LLVM  15.0.0git
SparcISelLowering.cpp
Go to the documentation of this file.
1 //===-- SparcISelLowering.cpp - Sparc DAG Lowering Implementation ---------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the interfaces that Sparc uses to lower LLVM code into a
10 // selection DAG.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "SparcISelLowering.h"
17 #include "SparcRegisterInfo.h"
18 #include "SparcTargetMachine.h"
19 #include "SparcTargetObjectFile.h"
20 #include "llvm/ADT/StringExtras.h"
21 #include "llvm/ADT/StringSwitch.h"
29 #include "llvm/IR/DerivedTypes.h"
30 #include "llvm/IR/Function.h"
31 #include "llvm/IR/Module.h"
33 #include "llvm/Support/KnownBits.h"
34 using namespace llvm;
35 
36 
37 //===----------------------------------------------------------------------===//
38 // Calling Convention Implementation
39 //===----------------------------------------------------------------------===//
40 
41 static bool CC_Sparc_Assign_SRet(unsigned &ValNo, MVT &ValVT,
42  MVT &LocVT, CCValAssign::LocInfo &LocInfo,
43  ISD::ArgFlagsTy &ArgFlags, CCState &State)
44 {
45  assert (ArgFlags.isSRet());
46 
47  // Assign SRet argument.
48  State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT,
49  0,
50  LocVT, LocInfo));
51  return true;
52 }
53 
54 static bool CC_Sparc_Assign_Split_64(unsigned &ValNo, MVT &ValVT,
55  MVT &LocVT, CCValAssign::LocInfo &LocInfo,
56  ISD::ArgFlagsTy &ArgFlags, CCState &State)
57 {
58  static const MCPhysReg RegList[] = {
59  SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5
60  };
61  // Try to get first reg.
62  if (Register Reg = State.AllocateReg(RegList)) {
63  State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
64  } else {
65  // Assign whole thing in stack.
67  ValNo, ValVT, State.AllocateStack(8, Align(4)), LocVT, LocInfo));
68  return true;
69  }
70 
71  // Try to get second reg.
72  if (Register Reg = State.AllocateReg(RegList))
73  State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
74  else
76  ValNo, ValVT, State.AllocateStack(4, Align(4)), LocVT, LocInfo));
77  return true;
78 }
79 
80 static bool CC_Sparc_Assign_Ret_Split_64(unsigned &ValNo, MVT &ValVT,
81  MVT &LocVT, CCValAssign::LocInfo &LocInfo,
82  ISD::ArgFlagsTy &ArgFlags, CCState &State)
83 {
84  static const MCPhysReg RegList[] = {
85  SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5
86  };
87 
88  // Try to get first reg.
89  if (Register Reg = State.AllocateReg(RegList))
90  State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
91  else
92  return false;
93 
94  // Try to get second reg.
95  if (Register Reg = State.AllocateReg(RegList))
96  State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
97  else
98  return false;
99 
100  return true;
101 }
102 
103 // Allocate a full-sized argument for the 64-bit ABI.
104 static bool CC_Sparc64_Full(unsigned &ValNo, MVT &ValVT,
105  MVT &LocVT, CCValAssign::LocInfo &LocInfo,
106  ISD::ArgFlagsTy &ArgFlags, CCState &State) {
107  assert((LocVT == MVT::f32 || LocVT == MVT::f128
108  || LocVT.getSizeInBits() == 64) &&
109  "Can't handle non-64 bits locations");
110 
111  // Stack space is allocated for all arguments starting from [%fp+BIAS+128].
112  unsigned size = (LocVT == MVT::f128) ? 16 : 8;
113  Align alignment = (LocVT == MVT::f128) ? Align(16) : Align(8);
114  unsigned Offset = State.AllocateStack(size, alignment);
115  unsigned Reg = 0;
116 
117  if (LocVT == MVT::i64 && Offset < 6*8)
118  // Promote integers to %i0-%i5.
119  Reg = SP::I0 + Offset/8;
120  else if (LocVT == MVT::f64 && Offset < 16*8)
121  // Promote doubles to %d0-%d30. (Which LLVM calls D0-D15).
122  Reg = SP::D0 + Offset/8;
123  else if (LocVT == MVT::f32 && Offset < 16*8)
124  // Promote floats to %f1, %f3, ...
125  Reg = SP::F1 + Offset/4;
126  else if (LocVT == MVT::f128 && Offset < 16*8)
127  // Promote long doubles to %q0-%q28. (Which LLVM calls Q0-Q7).
128  Reg = SP::Q0 + Offset/16;
129 
130  // Promote to register when possible, otherwise use the stack slot.
131  if (Reg) {
132  State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
133  return true;
134  }
135 
136  // This argument goes on the stack in an 8-byte slot.
137  // When passing floats, LocVT is smaller than 8 bytes. Adjust the offset to
138  // the right-aligned float. The first 4 bytes of the stack slot are undefined.
139  if (LocVT == MVT::f32)
140  Offset += 4;
141 
142  State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
143  return true;
144 }
145 
146 // Allocate a half-sized argument for the 64-bit ABI.
147 //
148 // This is used when passing { float, int } structs by value in registers.
149 static bool CC_Sparc64_Half(unsigned &ValNo, MVT &ValVT,
150  MVT &LocVT, CCValAssign::LocInfo &LocInfo,
151  ISD::ArgFlagsTy &ArgFlags, CCState &State) {
152  assert(LocVT.getSizeInBits() == 32 && "Can't handle non-32 bits locations");
153  unsigned Offset = State.AllocateStack(4, Align(4));
154 
155  if (LocVT == MVT::f32 && Offset < 16*8) {
156  // Promote floats to %f0-%f31.
157  State.addLoc(CCValAssign::getReg(ValNo, ValVT, SP::F0 + Offset/4,
158  LocVT, LocInfo));
159  return true;
160  }
161 
162  if (LocVT == MVT::i32 && Offset < 6*8) {
163  // Promote integers to %i0-%i5, using half the register.
164  unsigned Reg = SP::I0 + Offset/8;
165  LocVT = MVT::i64;
166  LocInfo = CCValAssign::AExt;
167 
168  // Set the Custom bit if this i32 goes in the high bits of a register.
169  if (Offset % 8 == 0)
170  State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg,
171  LocVT, LocInfo));
172  else
173  State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
174  return true;
175  }
176 
177  State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
178  return true;
179 }
180 
181 #include "SparcGenCallingConv.inc"
182 
183 // The calling conventions in SparcCallingConv.td are described in terms of the
184 // callee's register window. This function translates registers to the
185 // corresponding caller window %o register.
186 static unsigned toCallerWindow(unsigned Reg) {
187  static_assert(SP::I0 + 7 == SP::I7 && SP::O0 + 7 == SP::O7,
188  "Unexpected enum");
189  if (Reg >= SP::I0 && Reg <= SP::I7)
190  return Reg - SP::I0 + SP::O0;
191  return Reg;
192 }
193 
194 SDValue
196  bool IsVarArg,
198  const SmallVectorImpl<SDValue> &OutVals,
199  const SDLoc &DL, SelectionDAG &DAG) const {
200  if (Subtarget->is64Bit())
201  return LowerReturn_64(Chain, CallConv, IsVarArg, Outs, OutVals, DL, DAG);
202  return LowerReturn_32(Chain, CallConv, IsVarArg, Outs, OutVals, DL, DAG);
203 }
204 
205 SDValue
207  bool IsVarArg,
209  const SmallVectorImpl<SDValue> &OutVals,
210  const SDLoc &DL, SelectionDAG &DAG) const {
212 
213  // CCValAssign - represent the assignment of the return value to locations.
215 
216  // CCState - Info about the registers and stack slot.
217  CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
218  *DAG.getContext());
219 
220  // Analyze return values.
221  CCInfo.AnalyzeReturn(Outs, RetCC_Sparc32);
222 
223  SDValue Flag;
224  SmallVector<SDValue, 4> RetOps(1, Chain);
225  // Make room for the return address offset.
226  RetOps.push_back(SDValue());
227 
228  // Copy the result values into the output registers.
229  for (unsigned i = 0, realRVLocIdx = 0;
230  i != RVLocs.size();
231  ++i, ++realRVLocIdx) {
232  CCValAssign &VA = RVLocs[i];
233  assert(VA.isRegLoc() && "Can only return in registers!");
234 
235  SDValue Arg = OutVals[realRVLocIdx];
236 
237  if (VA.needsCustom()) {
238  assert(VA.getLocVT() == MVT::v2i32);
239  // Legalize ret v2i32 -> ret 2 x i32 (Basically: do what would
240  // happen by default if this wasn't a legal type)
241 
243  Arg,
244  DAG.getConstant(0, DL, getVectorIdxTy(DAG.getDataLayout())));
246  Arg,
247  DAG.getConstant(1, DL, getVectorIdxTy(DAG.getDataLayout())));
248 
249  Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Part0, Flag);
250  Flag = Chain.getValue(1);
251  RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
252  VA = RVLocs[++i]; // skip ahead to next loc
253  Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Part1,
254  Flag);
255  } else
256  Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Arg, Flag);
257 
258  // Guarantee that all emitted copies are stuck together with flags.
259  Flag = Chain.getValue(1);
260  RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
261  }
262 
263  unsigned RetAddrOffset = 8; // Call Inst + Delay Slot
264  // If the function returns a struct, copy the SRetReturnReg to I0
265  if (MF.getFunction().hasStructRetAttr()) {
267  Register Reg = SFI->getSRetReturnReg();
268  if (!Reg)
269  llvm_unreachable("sret virtual register not created in the entry block");
270  auto PtrVT = getPointerTy(DAG.getDataLayout());
271  SDValue Val = DAG.getCopyFromReg(Chain, DL, Reg, PtrVT);
272  Chain = DAG.getCopyToReg(Chain, DL, SP::I0, Val, Flag);
273  Flag = Chain.getValue(1);
274  RetOps.push_back(DAG.getRegister(SP::I0, PtrVT));
275  RetAddrOffset = 12; // CallInst + Delay Slot + Unimp
276  }
277 
278  RetOps[0] = Chain; // Update chain.
279  RetOps[1] = DAG.getConstant(RetAddrOffset, DL, MVT::i32);
280 
281  // Add the flag if we have it.
282  if (Flag.getNode())
283  RetOps.push_back(Flag);
284 
285  return DAG.getNode(SPISD::RET_FLAG, DL, MVT::Other, RetOps);
286 }
287 
288 // Lower return values for the 64-bit ABI.
289 // Return values are passed the exactly the same way as function arguments.
290 SDValue
292  bool IsVarArg,
294  const SmallVectorImpl<SDValue> &OutVals,
295  const SDLoc &DL, SelectionDAG &DAG) const {
296  // CCValAssign - represent the assignment of the return value to locations.
298 
299  // CCState - Info about the registers and stack slot.
300  CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
301  *DAG.getContext());
302 
303  // Analyze return values.
304  CCInfo.AnalyzeReturn(Outs, RetCC_Sparc64);
305 
306  SDValue Flag;
307  SmallVector<SDValue, 4> RetOps(1, Chain);
308 
309  // The second operand on the return instruction is the return address offset.
310  // The return address is always %i7+8 with the 64-bit ABI.
311  RetOps.push_back(DAG.getConstant(8, DL, MVT::i32));
312 
313  // Copy the result values into the output registers.
314  for (unsigned i = 0; i != RVLocs.size(); ++i) {
315  CCValAssign &VA = RVLocs[i];
316  assert(VA.isRegLoc() && "Can only return in registers!");
317  SDValue OutVal = OutVals[i];
318 
319  // Integer return values must be sign or zero extended by the callee.
320  switch (VA.getLocInfo()) {
321  case CCValAssign::Full: break;
322  case CCValAssign::SExt:
323  OutVal = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), OutVal);
324  break;
325  case CCValAssign::ZExt:
326  OutVal = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), OutVal);
327  break;
328  case CCValAssign::AExt:
329  OutVal = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), OutVal);
330  break;
331  default:
332  llvm_unreachable("Unknown loc info!");
333  }
334 
335  // The custom bit on an i32 return value indicates that it should be passed
336  // in the high bits of the register.
337  if (VA.getValVT() == MVT::i32 && VA.needsCustom()) {
338  OutVal = DAG.getNode(ISD::SHL, DL, MVT::i64, OutVal,
339  DAG.getConstant(32, DL, MVT::i32));
340 
341  // The next value may go in the low bits of the same register.
342  // Handle both at once.
343  if (i+1 < RVLocs.size() && RVLocs[i+1].getLocReg() == VA.getLocReg()) {
344  SDValue NV = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, OutVals[i+1]);
345  OutVal = DAG.getNode(ISD::OR, DL, MVT::i64, OutVal, NV);
346  // Skip the next value, it's already done.
347  ++i;
348  }
349  }
350 
351  Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), OutVal, Flag);
352 
353  // Guarantee that all emitted copies are stuck together with flags.
354  Flag = Chain.getValue(1);
355  RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
356  }
357 
358  RetOps[0] = Chain; // Update chain.
359 
360  // Add the flag if we have it.
361  if (Flag.getNode())
362  RetOps.push_back(Flag);
363 
364  return DAG.getNode(SPISD::RET_FLAG, DL, MVT::Other, RetOps);
365 }
366 
368  SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
370  SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
371  if (Subtarget->is64Bit())
372  return LowerFormalArguments_64(Chain, CallConv, IsVarArg, Ins,
373  DL, DAG, InVals);
374  return LowerFormalArguments_32(Chain, CallConv, IsVarArg, Ins,
375  DL, DAG, InVals);
376 }
377 
378 /// LowerFormalArguments32 - V8 uses a very simple ABI, where all values are
379 /// passed in either one or two GPRs, including FP values. TODO: we should
380 /// pass FP values in FP registers for fastcc functions.
382  SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
383  const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
384  SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
386  MachineRegisterInfo &RegInfo = MF.getRegInfo();
388 
389  // Assign locations to all of the incoming arguments.
391  CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
392  *DAG.getContext());
393  CCInfo.AnalyzeFormalArguments(Ins, CC_Sparc32);
394 
395  const unsigned StackOffset = 92;
396  bool IsLittleEndian = DAG.getDataLayout().isLittleEndian();
397 
398  unsigned InIdx = 0;
399  for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i, ++InIdx) {
400  CCValAssign &VA = ArgLocs[i];
401 
402  if (Ins[InIdx].Flags.isSRet()) {
403  if (InIdx != 0)
404  report_fatal_error("sparc only supports sret on the first parameter");
405  // Get SRet from [%fp+64].
406  int FrameIdx = MF.getFrameInfo().CreateFixedObject(4, 64, true);
407  SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);
408  SDValue Arg =
409  DAG.getLoad(MVT::i32, dl, Chain, FIPtr, MachinePointerInfo());
410  InVals.push_back(Arg);
411  continue;
412  }
413 
414  if (VA.isRegLoc()) {
415  if (VA.needsCustom()) {
416  assert(VA.getLocVT() == MVT::f64 || VA.getLocVT() == MVT::v2i32);
417 
418  Register VRegHi = RegInfo.createVirtualRegister(&SP::IntRegsRegClass);
419  MF.getRegInfo().addLiveIn(VA.getLocReg(), VRegHi);
420  SDValue HiVal = DAG.getCopyFromReg(Chain, dl, VRegHi, MVT::i32);
421 
422  assert(i+1 < e);
423  CCValAssign &NextVA = ArgLocs[++i];
424 
425  SDValue LoVal;
426  if (NextVA.isMemLoc()) {
427  int FrameIdx = MF.getFrameInfo().
428  CreateFixedObject(4, StackOffset+NextVA.getLocMemOffset(),true);
429  SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);
430  LoVal = DAG.getLoad(MVT::i32, dl, Chain, FIPtr, MachinePointerInfo());
431  } else {
432  Register loReg = MF.addLiveIn(NextVA.getLocReg(),
433  &SP::IntRegsRegClass);
434  LoVal = DAG.getCopyFromReg(Chain, dl, loReg, MVT::i32);
435  }
436 
437  if (IsLittleEndian)
438  std::swap(LoVal, HiVal);
439 
440  SDValue WholeValue =
441  DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, LoVal, HiVal);
442  WholeValue = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), WholeValue);
443  InVals.push_back(WholeValue);
444  continue;
445  }
446  Register VReg = RegInfo.createVirtualRegister(&SP::IntRegsRegClass);
447  MF.getRegInfo().addLiveIn(VA.getLocReg(), VReg);
448  SDValue Arg = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
449  if (VA.getLocVT() == MVT::f32)
450  Arg = DAG.getNode(ISD::BITCAST, dl, MVT::f32, Arg);
451  else if (VA.getLocVT() != MVT::i32) {
452  Arg = DAG.getNode(ISD::AssertSext, dl, MVT::i32, Arg,
453  DAG.getValueType(VA.getLocVT()));
454  Arg = DAG.getNode(ISD::TRUNCATE, dl, VA.getLocVT(), Arg);
455  }
456  InVals.push_back(Arg);
457  continue;
458  }
459 
460  assert(VA.isMemLoc());
461 
462  unsigned Offset = VA.getLocMemOffset()+StackOffset;
463  auto PtrVT = getPointerTy(DAG.getDataLayout());
464 
465  if (VA.needsCustom()) {
466  assert(VA.getValVT() == MVT::f64 || VA.getValVT() == MVT::v2i32);
467  // If it is double-word aligned, just load.
468  if (Offset % 8 == 0) {
469  int FI = MF.getFrameInfo().CreateFixedObject(8,
470  Offset,
471  true);
472  SDValue FIPtr = DAG.getFrameIndex(FI, PtrVT);
473  SDValue Load =
474  DAG.getLoad(VA.getValVT(), dl, Chain, FIPtr, MachinePointerInfo());
475  InVals.push_back(Load);
476  continue;
477  }
478 
479  int FI = MF.getFrameInfo().CreateFixedObject(4,
480  Offset,
481  true);
482  SDValue FIPtr = DAG.getFrameIndex(FI, PtrVT);
483  SDValue HiVal =
484  DAG.getLoad(MVT::i32, dl, Chain, FIPtr, MachinePointerInfo());
485  int FI2 = MF.getFrameInfo().CreateFixedObject(4,
486  Offset+4,
487  true);
488  SDValue FIPtr2 = DAG.getFrameIndex(FI2, PtrVT);
489 
490  SDValue LoVal =
491  DAG.getLoad(MVT::i32, dl, Chain, FIPtr2, MachinePointerInfo());
492 
493  if (IsLittleEndian)
494  std::swap(LoVal, HiVal);
495 
496  SDValue WholeValue =
497  DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, LoVal, HiVal);
498  WholeValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), WholeValue);
499  InVals.push_back(WholeValue);
500  continue;
501  }
502 
503  int FI = MF.getFrameInfo().CreateFixedObject(4,
504  Offset,
505  true);
506  SDValue FIPtr = DAG.getFrameIndex(FI, PtrVT);
507  SDValue Load ;
508  if (VA.getValVT() == MVT::i32 || VA.getValVT() == MVT::f32) {
509  Load = DAG.getLoad(VA.getValVT(), dl, Chain, FIPtr, MachinePointerInfo());
510  } else if (VA.getValVT() == MVT::f128) {
511  report_fatal_error("SPARCv8 does not handle f128 in calls; "
512  "pass indirectly");
513  } else {
514  // We shouldn't see any other value types here.
515  llvm_unreachable("Unexpected ValVT encountered in frame lowering.");
516  }
517  InVals.push_back(Load);
518  }
519 
520  if (MF.getFunction().hasStructRetAttr()) {
521  // Copy the SRet Argument to SRetReturnReg.
523  Register Reg = SFI->getSRetReturnReg();
524  if (!Reg) {
525  Reg = MF.getRegInfo().createVirtualRegister(&SP::IntRegsRegClass);
526  SFI->setSRetReturnReg(Reg);
527  }
528  SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), dl, Reg, InVals[0]);
529  Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Copy, Chain);
530  }
531 
532  // Store remaining ArgRegs to the stack if this is a varargs function.
533  if (isVarArg) {
534  static const MCPhysReg ArgRegs[] = {
535  SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5
536  };
537  unsigned NumAllocated = CCInfo.getFirstUnallocated(ArgRegs);
538  const MCPhysReg *CurArgReg = ArgRegs+NumAllocated, *ArgRegEnd = ArgRegs+6;
539  unsigned ArgOffset = CCInfo.getNextStackOffset();
540  if (NumAllocated == 6)
541  ArgOffset += StackOffset;
542  else {
543  assert(!ArgOffset);
544  ArgOffset = 68+4*NumAllocated;
545  }
546 
547  // Remember the vararg offset for the va_start implementation.
548  FuncInfo->setVarArgsFrameOffset(ArgOffset);
549 
550  std::vector<SDValue> OutChains;
551 
552  for (; CurArgReg != ArgRegEnd; ++CurArgReg) {
553  Register VReg = RegInfo.createVirtualRegister(&SP::IntRegsRegClass);
554  MF.getRegInfo().addLiveIn(*CurArgReg, VReg);
555  SDValue Arg = DAG.getCopyFromReg(DAG.getRoot(), dl, VReg, MVT::i32);
556 
557  int FrameIdx = MF.getFrameInfo().CreateFixedObject(4, ArgOffset,
558  true);
559  SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);
560 
561  OutChains.push_back(
562  DAG.getStore(DAG.getRoot(), dl, Arg, FIPtr, MachinePointerInfo()));
563  ArgOffset += 4;
564  }
565 
566  if (!OutChains.empty()) {
567  OutChains.push_back(Chain);
568  Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
569  }
570  }
571 
572  return Chain;
573 }
574 
575 // Lower formal arguments for the 64 bit ABI.
577  SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
579  SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
581 
582  // Analyze arguments according to CC_Sparc64.
584  CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), ArgLocs,
585  *DAG.getContext());
586  CCInfo.AnalyzeFormalArguments(Ins, CC_Sparc64);
587 
588  // The argument array begins at %fp+BIAS+128, after the register save area.
589  const unsigned ArgArea = 128;
590 
591  for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
592  CCValAssign &VA = ArgLocs[i];
593  if (VA.isRegLoc()) {
594  // This argument is passed in a register.
595  // All integer register arguments are promoted by the caller to i64.
596 
597  // Create a virtual register for the promoted live-in value.
598  Register VReg = MF.addLiveIn(VA.getLocReg(),
599  getRegClassFor(VA.getLocVT()));
600  SDValue Arg = DAG.getCopyFromReg(Chain, DL, VReg, VA.getLocVT());
601 
602  // Get the high bits for i32 struct elements.
603  if (VA.getValVT() == MVT::i32 && VA.needsCustom())
604  Arg = DAG.getNode(ISD::SRL, DL, VA.getLocVT(), Arg,
605  DAG.getConstant(32, DL, MVT::i32));
606 
607  // The caller promoted the argument, so insert an Assert?ext SDNode so we
608  // won't promote the value again in this function.
609  switch (VA.getLocInfo()) {
610  case CCValAssign::SExt:
611  Arg = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), Arg,
612  DAG.getValueType(VA.getValVT()));
613  break;
614  case CCValAssign::ZExt:
615  Arg = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), Arg,
616  DAG.getValueType(VA.getValVT()));
617  break;
618  default:
619  break;
620  }
621 
622  // Truncate the register down to the argument type.
623  if (VA.isExtInLoc())
624  Arg = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Arg);
625 
626  InVals.push_back(Arg);
627  continue;
628  }
629 
630  // The registers are exhausted. This argument was passed on the stack.
631  assert(VA.isMemLoc());
632  // The CC_Sparc64_Full/Half functions compute stack offsets relative to the
633  // beginning of the arguments area at %fp+BIAS+128.
634  unsigned Offset = VA.getLocMemOffset() + ArgArea;
635  unsigned ValSize = VA.getValVT().getSizeInBits() / 8;
636  // Adjust offset for extended arguments, SPARC is big-endian.
637  // The caller will have written the full slot with extended bytes, but we
638  // prefer our own extending loads.
639  if (VA.isExtInLoc())
640  Offset += 8 - ValSize;
641  int FI = MF.getFrameInfo().CreateFixedObject(ValSize, Offset, true);
642  InVals.push_back(
643  DAG.getLoad(VA.getValVT(), DL, Chain,
646  }
647 
648  if (!IsVarArg)
649  return Chain;
650 
651  // This function takes variable arguments, some of which may have been passed
652  // in registers %i0-%i5. Variable floating point arguments are never passed
653  // in floating point registers. They go on %i0-%i5 or on the stack like
654  // integer arguments.
655  //
656  // The va_start intrinsic needs to know the offset to the first variable
657  // argument.
658  unsigned ArgOffset = CCInfo.getNextStackOffset();
660  // Skip the 128 bytes of register save area.
661  FuncInfo->setVarArgsFrameOffset(ArgOffset + ArgArea +
662  Subtarget->getStackPointerBias());
663 
664  // Save the variable arguments that were passed in registers.
665  // The caller is required to reserve stack space for 6 arguments regardless
666  // of how many arguments were actually passed.
667  SmallVector<SDValue, 8> OutChains;
668  for (; ArgOffset < 6*8; ArgOffset += 8) {
669  Register VReg = MF.addLiveIn(SP::I0 + ArgOffset/8, &SP::I64RegsRegClass);
670  SDValue VArg = DAG.getCopyFromReg(Chain, DL, VReg, MVT::i64);
671  int FI = MF.getFrameInfo().CreateFixedObject(8, ArgOffset + ArgArea, true);
672  auto PtrVT = getPointerTy(MF.getDataLayout());
673  OutChains.push_back(
674  DAG.getStore(Chain, DL, VArg, DAG.getFrameIndex(FI, PtrVT),
676  }
677 
678  if (!OutChains.empty())
679  Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains);
680 
681  return Chain;
682 }
683 
684 SDValue
686  SmallVectorImpl<SDValue> &InVals) const {
687  if (Subtarget->is64Bit())
688  return LowerCall_64(CLI, InVals);
689  return LowerCall_32(CLI, InVals);
690 }
691 
692 static bool hasReturnsTwiceAttr(SelectionDAG &DAG, SDValue Callee,
693  const CallBase *Call) {
694  if (Call)
695  return Call->hasFnAttr(Attribute::ReturnsTwice);
696 
697  const Function *CalleeFn = nullptr;
698  if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
699  CalleeFn = dyn_cast<Function>(G->getGlobal());
700  } else if (ExternalSymbolSDNode *E =
701  dyn_cast<ExternalSymbolSDNode>(Callee)) {
702  const Function &Fn = DAG.getMachineFunction().getFunction();
703  const Module *M = Fn.getParent();
704  const char *CalleeName = E->getSymbol();
705  CalleeFn = M->getFunction(CalleeName);
706  }
707 
708  if (!CalleeFn)
709  return false;
710  return CalleeFn->hasFnAttribute(Attribute::ReturnsTwice);
711 }
712 
713 /// IsEligibleForTailCallOptimization - Check whether the call is eligible
714 /// for tail call optimization.
716  CCState &CCInfo, CallLoweringInfo &CLI, MachineFunction &MF) const {
717 
718  auto &Outs = CLI.Outs;
719  auto &Caller = MF.getFunction();
720 
721  // Do not tail call opt functions with "disable-tail-calls" attribute.
722  if (Caller.getFnAttribute("disable-tail-calls").getValueAsString() == "true")
723  return false;
724 
725  // Do not tail call opt if the stack is used to pass parameters.
726  if (CCInfo.getNextStackOffset() != 0)
727  return false;
728 
729  // Do not tail call opt if either the callee or caller returns
730  // a struct and the other does not.
731  if (!Outs.empty() && Caller.hasStructRetAttr() != Outs[0].Flags.isSRet())
732  return false;
733 
734  // Byval parameters hand the function a pointer directly into the stack area
735  // we want to reuse during a tail call.
736  for (auto &Arg : Outs)
737  if (Arg.Flags.isByVal())
738  return false;
739 
740  return true;
741 }
742 
743 // Lower a call for the 32-bit ABI.
744 SDValue
746  SmallVectorImpl<SDValue> &InVals) const {
747  SelectionDAG &DAG = CLI.DAG;
748  SDLoc &dl = CLI.DL;
750  SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
752  SDValue Chain = CLI.Chain;
753  SDValue Callee = CLI.Callee;
754  bool &isTailCall = CLI.IsTailCall;
755  CallingConv::ID CallConv = CLI.CallConv;
756  bool isVarArg = CLI.IsVarArg;
757 
758  // Analyze operands of the call, assigning locations to each operand.
760  CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
761  *DAG.getContext());
762  CCInfo.AnalyzeCallOperands(Outs, CC_Sparc32);
763 
764  isTailCall = isTailCall && IsEligibleForTailCallOptimization(
765  CCInfo, CLI, DAG.getMachineFunction());
766 
767  // Get the size of the outgoing arguments stack space requirement.
768  unsigned ArgsSize = CCInfo.getNextStackOffset();
769 
770  // Keep stack frames 8-byte aligned.
771  ArgsSize = (ArgsSize+7) & ~7;
772 
774 
775  // Create local copies for byval args.
776  SmallVector<SDValue, 8> ByValArgs;
777  for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
778  ISD::ArgFlagsTy Flags = Outs[i].Flags;
779  if (!Flags.isByVal())
780  continue;
781 
782  SDValue Arg = OutVals[i];
783  unsigned Size = Flags.getByValSize();
784  Align Alignment = Flags.getNonZeroByValAlign();
785 
786  if (Size > 0U) {
787  int FI = MFI.CreateStackObject(Size, Alignment, false);
788  SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
789  SDValue SizeNode = DAG.getConstant(Size, dl, MVT::i32);
790 
791  Chain = DAG.getMemcpy(Chain, dl, FIPtr, Arg, SizeNode, Alignment,
792  false, // isVolatile,
793  (Size <= 32), // AlwaysInline if size <= 32,
794  false, // isTailCall
796  ByValArgs.push_back(FIPtr);
797  }
798  else {
799  SDValue nullVal;
800  ByValArgs.push_back(nullVal);
801  }
802  }
803 
804  assert(!isTailCall || ArgsSize == 0);
805 
806  if (!isTailCall)
807  Chain = DAG.getCALLSEQ_START(Chain, ArgsSize, 0, dl);
808 
810  SmallVector<SDValue, 8> MemOpChains;
811 
812  const unsigned StackOffset = 92;
813  bool hasStructRetAttr = false;
814  unsigned SRetArgSize = 0;
815  // Walk the register/memloc assignments, inserting copies/loads.
816  for (unsigned i = 0, realArgIdx = 0, byvalArgIdx = 0, e = ArgLocs.size();
817  i != e;
818  ++i, ++realArgIdx) {
819  CCValAssign &VA = ArgLocs[i];
820  SDValue Arg = OutVals[realArgIdx];
821 
822  ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags;
823 
824  // Use local copy if it is a byval arg.
825  if (Flags.isByVal()) {
826  Arg = ByValArgs[byvalArgIdx++];
827  if (!Arg) {
828  continue;
829  }
830  }
831 
832  // Promote the value if needed.
833  switch (VA.getLocInfo()) {
834  default: llvm_unreachable("Unknown loc info!");
835  case CCValAssign::Full: break;
836  case CCValAssign::SExt:
837  Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
838  break;
839  case CCValAssign::ZExt:
840  Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
841  break;
842  case CCValAssign::AExt:
843  Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
844  break;
845  case CCValAssign::BCvt:
846  Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg);
847  break;
848  }
849 
850  if (Flags.isSRet()) {
851  assert(VA.needsCustom());
852 
853  if (isTailCall)
854  continue;
855 
856  // store SRet argument in %sp+64
857  SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
858  SDValue PtrOff = DAG.getIntPtrConstant(64, dl);
859  PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
860  MemOpChains.push_back(
861  DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
862  hasStructRetAttr = true;
863  // sret only allowed on first argument
864  assert(Outs[realArgIdx].OrigArgIndex == 0);
865  SRetArgSize =
866  DAG.getDataLayout().getTypeAllocSize(CLI.getArgs()[0].IndirectType);
867  continue;
868  }
869 
870  if (VA.needsCustom()) {
871  assert(VA.getLocVT() == MVT::f64 || VA.getLocVT() == MVT::v2i32);
872 
873  if (VA.isMemLoc()) {
874  unsigned Offset = VA.getLocMemOffset() + StackOffset;
875  // if it is double-word aligned, just store.
876  if (Offset % 8 == 0) {
877  SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
878  SDValue PtrOff = DAG.getIntPtrConstant(Offset, dl);
879  PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
880  MemOpChains.push_back(
881  DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
882  continue;
883  }
884  }
885 
886  if (VA.getLocVT() == MVT::f64) {
887  // Move from the float value from float registers into the
888  // integer registers.
889  if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Arg))
890  Arg = bitcastConstantFPToInt(C, dl, DAG);
891  else
892  Arg = DAG.getNode(ISD::BITCAST, dl, MVT::v2i32, Arg);
893  }
894 
896  Arg,
897  DAG.getConstant(0, dl, getVectorIdxTy(DAG.getDataLayout())));
899  Arg,
900  DAG.getConstant(1, dl, getVectorIdxTy(DAG.getDataLayout())));
901 
902  if (VA.isRegLoc()) {
903  RegsToPass.push_back(std::make_pair(VA.getLocReg(), Part0));
904  assert(i+1 != e);
905  CCValAssign &NextVA = ArgLocs[++i];
906  if (NextVA.isRegLoc()) {
907  RegsToPass.push_back(std::make_pair(NextVA.getLocReg(), Part1));
908  } else {
909  // Store the second part in stack.
910  unsigned Offset = NextVA.getLocMemOffset() + StackOffset;
911  SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
912  SDValue PtrOff = DAG.getIntPtrConstant(Offset, dl);
913  PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
914  MemOpChains.push_back(
915  DAG.getStore(Chain, dl, Part1, PtrOff, MachinePointerInfo()));
916  }
917  } else {
918  unsigned Offset = VA.getLocMemOffset() + StackOffset;
919  // Store the first part.
920  SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
921  SDValue PtrOff = DAG.getIntPtrConstant(Offset, dl);
922  PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
923  MemOpChains.push_back(
924  DAG.getStore(Chain, dl, Part0, PtrOff, MachinePointerInfo()));
925  // Store the second part.
926  PtrOff = DAG.getIntPtrConstant(Offset + 4, dl);
927  PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
928  MemOpChains.push_back(
929  DAG.getStore(Chain, dl, Part1, PtrOff, MachinePointerInfo()));
930  }
931  continue;
932  }
933 
934  // Arguments that can be passed on register must be kept at
935  // RegsToPass vector
936  if (VA.isRegLoc()) {
937  if (VA.getLocVT() != MVT::f32) {
938  RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
939  continue;
940  }
941  Arg = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg);
942  RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
943  continue;
944  }
945 
946  assert(VA.isMemLoc());
947 
948  // Create a store off the stack pointer for this argument.
949  SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
951  dl);
952  PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
953  MemOpChains.push_back(
954  DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
955  }
956 
957 
958  // Emit all stores, make sure the occur before any copies into physregs.
959  if (!MemOpChains.empty())
960  Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
961 
962  // Build a sequence of copy-to-reg nodes chained together with token
963  // chain and flag operands which copy the outgoing args into registers.
964  // The InFlag in necessary since all emitted instructions must be
965  // stuck together.
966  SDValue InFlag;
967  for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
968  Register Reg = RegsToPass[i].first;
969  if (!isTailCall)
971  Chain = DAG.getCopyToReg(Chain, dl, Reg, RegsToPass[i].second, InFlag);
972  InFlag = Chain.getValue(1);
973  }
974 
975  bool hasReturnsTwice = hasReturnsTwiceAttr(DAG, Callee, CLI.CB);
976 
977  // If the callee is a GlobalAddress node (quite common, every direct call is)
978  // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
979  // Likewise ExternalSymbol -> TargetExternalSymbol.
982  if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
983  Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, MVT::i32, 0, TF);
984  else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee))
985  Callee = DAG.getTargetExternalSymbol(E->getSymbol(), MVT::i32, TF);
986 
987  // Returns a chain & a flag for retval copy to use
988  SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
990  Ops.push_back(Chain);
991  Ops.push_back(Callee);
992  if (hasStructRetAttr)
993  Ops.push_back(DAG.getTargetConstant(SRetArgSize, dl, MVT::i32));
994  for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
995  Register Reg = RegsToPass[i].first;
996  if (!isTailCall)
998  Ops.push_back(DAG.getRegister(Reg, RegsToPass[i].second.getValueType()));
999  }
1000 
1001  // Add a register mask operand representing the call-preserved registers.
1002  const SparcRegisterInfo *TRI = Subtarget->getRegisterInfo();
1003  const uint32_t *Mask =
1004  ((hasReturnsTwice)
1005  ? TRI->getRTCallPreservedMask(CallConv)
1006  : TRI->getCallPreservedMask(DAG.getMachineFunction(), CallConv));
1007  assert(Mask && "Missing call preserved mask for calling convention");
1008  Ops.push_back(DAG.getRegisterMask(Mask));
1009 
1010  if (InFlag.getNode())
1011  Ops.push_back(InFlag);
1012 
1013  if (isTailCall) {
1015  return DAG.getNode(SPISD::TAIL_CALL, dl, MVT::Other, Ops);
1016  }
1017 
1018  Chain = DAG.getNode(SPISD::CALL, dl, NodeTys, Ops);
1019  InFlag = Chain.getValue(1);
1020 
1021  Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(ArgsSize, dl, true),
1022  DAG.getIntPtrConstant(0, dl, true), InFlag, dl);
1023  InFlag = Chain.getValue(1);
1024 
1025  // Assign locations to each value returned by this call.
1027  CCState RVInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
1028  *DAG.getContext());
1029 
1030  RVInfo.AnalyzeCallResult(Ins, RetCC_Sparc32);
1031 
1032  // Copy all of the result registers out of their specified physreg.
1033  for (unsigned i = 0; i != RVLocs.size(); ++i) {
1034  if (RVLocs[i].getLocVT() == MVT::v2i32) {
1035  SDValue Vec = DAG.getNode(ISD::UNDEF, dl, MVT::v2i32);
1036  SDValue Lo = DAG.getCopyFromReg(
1037  Chain, dl, toCallerWindow(RVLocs[i++].getLocReg()), MVT::i32, InFlag);
1038  Chain = Lo.getValue(1);
1039  InFlag = Lo.getValue(2);
1040  Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2i32, Vec, Lo,
1041  DAG.getConstant(0, dl, MVT::i32));
1042  SDValue Hi = DAG.getCopyFromReg(
1043  Chain, dl, toCallerWindow(RVLocs[i].getLocReg()), MVT::i32, InFlag);
1044  Chain = Hi.getValue(1);
1045  InFlag = Hi.getValue(2);
1046  Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2i32, Vec, Hi,
1047  DAG.getConstant(1, dl, MVT::i32));
1048  InVals.push_back(Vec);
1049  } else {
1050  Chain =
1051  DAG.getCopyFromReg(Chain, dl, toCallerWindow(RVLocs[i].getLocReg()),
1052  RVLocs[i].getValVT(), InFlag)
1053  .getValue(1);
1054  InFlag = Chain.getValue(2);
1055  InVals.push_back(Chain.getValue(0));
1056  }
1057  }
1058 
1059  return Chain;
1060 }
1061 
1062 // FIXME? Maybe this could be a TableGen attribute on some registers and
1063 // this table could be generated automatically from RegInfo.
1065  const MachineFunction &MF) const {
1067  .Case("i0", SP::I0).Case("i1", SP::I1).Case("i2", SP::I2).Case("i3", SP::I3)
1068  .Case("i4", SP::I4).Case("i5", SP::I5).Case("i6", SP::I6).Case("i7", SP::I7)
1069  .Case("o0", SP::O0).Case("o1", SP::O1).Case("o2", SP::O2).Case("o3", SP::O3)
1070  .Case("o4", SP::O4).Case("o5", SP::O5).Case("o6", SP::O6).Case("o7", SP::O7)
1071  .Case("l0", SP::L0).Case("l1", SP::L1).Case("l2", SP::L2).Case("l3", SP::L3)
1072  .Case("l4", SP::L4).Case("l5", SP::L5).Case("l6", SP::L6).Case("l7", SP::L7)
1073  .Case("g0", SP::G0).Case("g1", SP::G1).Case("g2", SP::G2).Case("g3", SP::G3)
1074  .Case("g4", SP::G4).Case("g5", SP::G5).Case("g6", SP::G6).Case("g7", SP::G7)
1075  .Default(0);
1076 
1077  if (Reg)
1078  return Reg;
1079 
1080  report_fatal_error("Invalid register name global variable");
1081 }
1082 
1083 // Fixup floating point arguments in the ... part of a varargs call.
1084 //
1085 // The SPARC v9 ABI requires that floating point arguments are treated the same
1086 // as integers when calling a varargs function. This does not apply to the
1087 // fixed arguments that are part of the function's prototype.
1088 //
1089 // This function post-processes a CCValAssign array created by
1090 // AnalyzeCallOperands().
1092  ArrayRef<ISD::OutputArg> Outs) {
1093  for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1094  const CCValAssign &VA = ArgLocs[i];
1095  MVT ValTy = VA.getLocVT();
1096  // FIXME: What about f32 arguments? C promotes them to f64 when calling
1097  // varargs functions.
1098  if (!VA.isRegLoc() || (ValTy != MVT::f64 && ValTy != MVT::f128))
1099  continue;
1100  // The fixed arguments to a varargs function still go in FP registers.
1101  if (Outs[VA.getValNo()].IsFixed)
1102  continue;
1103 
1104  // This floating point argument should be reassigned.
1105  CCValAssign NewVA;
1106 
1107  // Determine the offset into the argument array.
1108  Register firstReg = (ValTy == MVT::f64) ? SP::D0 : SP::Q0;
1109  unsigned argSize = (ValTy == MVT::f64) ? 8 : 16;
1110  unsigned Offset = argSize * (VA.getLocReg() - firstReg);
1111  assert(Offset < 16*8 && "Offset out of range, bad register enum?");
1112 
1113  if (Offset < 6*8) {
1114  // This argument should go in %i0-%i5.
1115  unsigned IReg = SP::I0 + Offset/8;
1116  if (ValTy == MVT::f64)
1117  // Full register, just bitconvert into i64.
1118  NewVA = CCValAssign::getReg(VA.getValNo(), VA.getValVT(),
1119  IReg, MVT::i64, CCValAssign::BCvt);
1120  else {
1121  assert(ValTy == MVT::f128 && "Unexpected type!");
1122  // Full register, just bitconvert into i128 -- We will lower this into
1123  // two i64s in LowerCall_64.
1124  NewVA = CCValAssign::getCustomReg(VA.getValNo(), VA.getValVT(),
1125  IReg, MVT::i128, CCValAssign::BCvt);
1126  }
1127  } else {
1128  // This needs to go to memory, we're out of integer registers.
1129  NewVA = CCValAssign::getMem(VA.getValNo(), VA.getValVT(),
1130  Offset, VA.getLocVT(), VA.getLocInfo());
1131  }
1132  ArgLocs[i] = NewVA;
1133  }
1134 }
1135 
1136 // Lower a call for the 64-bit ABI.
1137 SDValue
1139  SmallVectorImpl<SDValue> &InVals) const {
1140  SelectionDAG &DAG = CLI.DAG;
1141  SDLoc DL = CLI.DL;
1142  SDValue Chain = CLI.Chain;
1143  auto PtrVT = getPointerTy(DAG.getDataLayout());
1144 
1145  // Sparc target does not yet support tail call optimization.
1146  CLI.IsTailCall = false;
1147 
1148  // Analyze operands of the call, assigning locations to each operand.
1150  CCState CCInfo(CLI.CallConv, CLI.IsVarArg, DAG.getMachineFunction(), ArgLocs,
1151  *DAG.getContext());
1152  CCInfo.AnalyzeCallOperands(CLI.Outs, CC_Sparc64);
1153 
1154  // Get the size of the outgoing arguments stack space requirement.
1155  // The stack offset computed by CC_Sparc64 includes all arguments.
1156  // Called functions expect 6 argument words to exist in the stack frame, used
1157  // or not.
1158  unsigned ArgsSize = std::max(6*8u, CCInfo.getNextStackOffset());
1159 
1160  // Keep stack frames 16-byte aligned.
1161  ArgsSize = alignTo(ArgsSize, 16);
1162 
1163  // Varargs calls require special treatment.
1164  if (CLI.IsVarArg)
1165  fixupVariableFloatArgs(ArgLocs, CLI.Outs);
1166 
1167  // Adjust the stack pointer to make room for the arguments.
1168  // FIXME: Use hasReservedCallFrame to avoid %sp adjustments around all calls
1169  // with more than 6 arguments.
1170  Chain = DAG.getCALLSEQ_START(Chain, ArgsSize, 0, DL);
1171 
1172  // Collect the set of registers to pass to the function and their values.
1173  // This will be emitted as a sequence of CopyToReg nodes glued to the call
1174  // instruction.
1176 
1177  // Collect chains from all the memory opeations that copy arguments to the
1178  // stack. They must follow the stack pointer adjustment above and precede the
1179  // call instruction itself.
1180  SmallVector<SDValue, 8> MemOpChains;
1181 
1182  for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1183  const CCValAssign &VA = ArgLocs[i];
1184  SDValue Arg = CLI.OutVals[i];
1185 
1186  // Promote the value if needed.
1187  switch (VA.getLocInfo()) {
1188  default:
1189  llvm_unreachable("Unknown location info!");
1190  case CCValAssign::Full:
1191  break;
1192  case CCValAssign::SExt:
1193  Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Arg);
1194  break;
1195  case CCValAssign::ZExt:
1196  Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg);
1197  break;
1198  case CCValAssign::AExt:
1199  Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg);
1200  break;
1201  case CCValAssign::BCvt:
1202  // fixupVariableFloatArgs() may create bitcasts from f128 to i128. But
1203  // SPARC does not support i128 natively. Lower it into two i64, see below.
1204  if (!VA.needsCustom() || VA.getValVT() != MVT::f128
1205  || VA.getLocVT() != MVT::i128)
1206  Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg);
1207  break;
1208  }
1209 
1210  if (VA.isRegLoc()) {
1211  if (VA.needsCustom() && VA.getValVT() == MVT::f128
1212  && VA.getLocVT() == MVT::i128) {
1213  // Store and reload into the integer register reg and reg+1.
1214  unsigned Offset = 8 * (VA.getLocReg() - SP::I0);
1215  unsigned StackOffset = Offset + Subtarget->getStackPointerBias() + 128;
1216  SDValue StackPtr = DAG.getRegister(SP::O6, PtrVT);
1217  SDValue HiPtrOff = DAG.getIntPtrConstant(StackOffset, DL);
1218  HiPtrOff = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, HiPtrOff);
1219  SDValue LoPtrOff = DAG.getIntPtrConstant(StackOffset + 8, DL);
1220  LoPtrOff = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, LoPtrOff);
1221 
1222  // Store to %sp+BIAS+128+Offset
1223  SDValue Store =
1224  DAG.getStore(Chain, DL, Arg, HiPtrOff, MachinePointerInfo());
1225  // Load into Reg and Reg+1
1226  SDValue Hi64 =
1227  DAG.getLoad(MVT::i64, DL, Store, HiPtrOff, MachinePointerInfo());
1228  SDValue Lo64 =
1229  DAG.getLoad(MVT::i64, DL, Store, LoPtrOff, MachinePointerInfo());
1230  RegsToPass.push_back(std::make_pair(toCallerWindow(VA.getLocReg()),
1231  Hi64));
1232  RegsToPass.push_back(std::make_pair(toCallerWindow(VA.getLocReg()+1),
1233  Lo64));
1234  continue;
1235  }
1236 
1237  // The custom bit on an i32 return value indicates that it should be
1238  // passed in the high bits of the register.
1239  if (VA.getValVT() == MVT::i32 && VA.needsCustom()) {
1240  Arg = DAG.getNode(ISD::SHL, DL, MVT::i64, Arg,
1241  DAG.getConstant(32, DL, MVT::i32));
1242 
1243  // The next value may go in the low bits of the same register.
1244  // Handle both at once.
1245  if (i+1 < ArgLocs.size() && ArgLocs[i+1].isRegLoc() &&
1246  ArgLocs[i+1].getLocReg() == VA.getLocReg()) {
1248  CLI.OutVals[i+1]);
1249  Arg = DAG.getNode(ISD::OR, DL, MVT::i64, Arg, NV);
1250  // Skip the next value, it's already done.
1251  ++i;
1252  }
1253  }
1254  RegsToPass.push_back(std::make_pair(toCallerWindow(VA.getLocReg()), Arg));
1255  continue;
1256  }
1257 
1258  assert(VA.isMemLoc());
1259 
1260  // Create a store off the stack pointer for this argument.
1261  SDValue StackPtr = DAG.getRegister(SP::O6, PtrVT);
1262  // The argument area starts at %fp+BIAS+128 in the callee frame,
1263  // %sp+BIAS+128 in ours.
1264  SDValue PtrOff = DAG.getIntPtrConstant(VA.getLocMemOffset() +
1265  Subtarget->getStackPointerBias() +
1266  128, DL);
1267  PtrOff = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, PtrOff);
1268  MemOpChains.push_back(
1269  DAG.getStore(Chain, DL, Arg, PtrOff, MachinePointerInfo()));
1270  }
1271 
1272  // Emit all stores, make sure they occur before the call.
1273  if (!MemOpChains.empty())
1274  Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
1275 
1276  // Build a sequence of CopyToReg nodes glued together with token chain and
1277  // glue operands which copy the outgoing args into registers. The InGlue is
1278  // necessary since all emitted instructions must be stuck together in order
1279  // to pass the live physical registers.
1280  SDValue InGlue;
1281  for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
1282  Chain = DAG.getCopyToReg(Chain, DL,
1283  RegsToPass[i].first, RegsToPass[i].second, InGlue);
1284  InGlue = Chain.getValue(1);
1285  }
1286 
1287  // If the callee is a GlobalAddress node (quite common, every direct call is)
1288  // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
1289  // Likewise ExternalSymbol -> TargetExternalSymbol.
1290  SDValue Callee = CLI.Callee;
1291  bool hasReturnsTwice = hasReturnsTwiceAttr(DAG, Callee, CLI.CB);
1294  if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
1295  Callee = DAG.getTargetGlobalAddress(G->getGlobal(), DL, PtrVT, 0, TF);
1296  else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee))
1297  Callee = DAG.getTargetExternalSymbol(E->getSymbol(), PtrVT, TF);
1298 
1299  // Build the operands for the call instruction itself.
1301  Ops.push_back(Chain);
1302  Ops.push_back(Callee);
1303  for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
1304  Ops.push_back(DAG.getRegister(RegsToPass[i].first,
1305  RegsToPass[i].second.getValueType()));
1306 
1307  // Add a register mask operand representing the call-preserved registers.
1308  const SparcRegisterInfo *TRI = Subtarget->getRegisterInfo();
1309  const uint32_t *Mask =
1310  ((hasReturnsTwice) ? TRI->getRTCallPreservedMask(CLI.CallConv)
1312  CLI.CallConv));
1313  assert(Mask && "Missing call preserved mask for calling convention");
1314  Ops.push_back(DAG.getRegisterMask(Mask));
1315 
1316  // Make sure the CopyToReg nodes are glued to the call instruction which
1317  // consumes the registers.
1318  if (InGlue.getNode())
1319  Ops.push_back(InGlue);
1320 
1321  // Now the call itself.
1322  SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
1323  Chain = DAG.getNode(SPISD::CALL, DL, NodeTys, Ops);
1324  InGlue = Chain.getValue(1);
1325 
1326  // Revert the stack pointer immediately after the call.
1327  Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(ArgsSize, DL, true),
1328  DAG.getIntPtrConstant(0, DL, true), InGlue, DL);
1329  InGlue = Chain.getValue(1);
1330 
1331  // Now extract the return values. This is more or less the same as
1332  // LowerFormalArguments_64.
1333 
1334  // Assign locations to each value returned by this call.
1336  CCState RVInfo(CLI.CallConv, CLI.IsVarArg, DAG.getMachineFunction(), RVLocs,
1337  *DAG.getContext());
1338 
1339  // Set inreg flag manually for codegen generated library calls that
1340  // return float.
1341  if (CLI.Ins.size() == 1 && CLI.Ins[0].VT == MVT::f32 && !CLI.CB)
1342  CLI.Ins[0].Flags.setInReg();
1343 
1344  RVInfo.AnalyzeCallResult(CLI.Ins, RetCC_Sparc64);
1345 
1346  // Copy all of the result registers out of their specified physreg.
1347  for (unsigned i = 0; i != RVLocs.size(); ++i) {
1348  CCValAssign &VA = RVLocs[i];
1349  unsigned Reg = toCallerWindow(VA.getLocReg());
1350 
1351  // When returning 'inreg {i32, i32 }', two consecutive i32 arguments can
1352  // reside in the same register in the high and low bits. Reuse the
1353  // CopyFromReg previous node to avoid duplicate copies.
1354  SDValue RV;
1355  if (RegisterSDNode *SrcReg = dyn_cast<RegisterSDNode>(Chain.getOperand(1)))
1356  if (SrcReg->getReg() == Reg && Chain->getOpcode() == ISD::CopyFromReg)
1357  RV = Chain.getValue(0);
1358 
1359  // But usually we'll create a new CopyFromReg for a different register.
1360  if (!RV.getNode()) {
1361  RV = DAG.getCopyFromReg(Chain, DL, Reg, RVLocs[i].getLocVT(), InGlue);
1362  Chain = RV.getValue(1);
1363  InGlue = Chain.getValue(2);
1364  }
1365 
1366  // Get the high bits for i32 struct elements.
1367  if (VA.getValVT() == MVT::i32 && VA.needsCustom())
1368  RV = DAG.getNode(ISD::SRL, DL, VA.getLocVT(), RV,
1369  DAG.getConstant(32, DL, MVT::i32));
1370 
1371  // The callee promoted the return value, so insert an Assert?ext SDNode so
1372  // we won't promote the value again in this function.
1373  switch (VA.getLocInfo()) {
1374  case CCValAssign::SExt:
1375  RV = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), RV,
1376  DAG.getValueType(VA.getValVT()));
1377  break;
1378  case CCValAssign::ZExt:
1379  RV = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), RV,
1380  DAG.getValueType(VA.getValVT()));
1381  break;
1382  default:
1383  break;
1384  }
1385 
1386  // Truncate the register down to the return value type.
1387  if (VA.isExtInLoc())
1388  RV = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), RV);
1389 
1390  InVals.push_back(RV);
1391  }
1392 
1393  return Chain;
1394 }
1395 
1396 //===----------------------------------------------------------------------===//
1397 // TargetLowering Implementation
1398 //===----------------------------------------------------------------------===//
1399 
1401  if (AI->getOperation() == AtomicRMWInst::Xchg &&
1402  AI->getType()->getPrimitiveSizeInBits() == 32)
1403  return AtomicExpansionKind::None; // Uses xchg instruction
1404 
1406 }
1407 
1408 /// IntCondCCodeToICC - Convert a DAG integer condition code to a SPARC ICC
1409 /// condition.
1411  switch (CC) {
1412  default: llvm_unreachable("Unknown integer condition code!");
1413  case ISD::SETEQ: return SPCC::ICC_E;
1414  case ISD::SETNE: return SPCC::ICC_NE;
1415  case ISD::SETLT: return SPCC::ICC_L;
1416  case ISD::SETGT: return SPCC::ICC_G;
1417  case ISD::SETLE: return SPCC::ICC_LE;
1418  case ISD::SETGE: return SPCC::ICC_GE;
1419  case ISD::SETULT: return SPCC::ICC_CS;
1420  case ISD::SETULE: return SPCC::ICC_LEU;
1421  case ISD::SETUGT: return SPCC::ICC_GU;
1422  case ISD::SETUGE: return SPCC::ICC_CC;
1423  }
1424 }
1425 
1426 /// FPCondCCodeToFCC - Convert a DAG floatingp oint condition code to a SPARC
1427 /// FCC condition.
1429  switch (CC) {
1430  default: llvm_unreachable("Unknown fp condition code!");
1431  case ISD::SETEQ:
1432  case ISD::SETOEQ: return SPCC::FCC_E;
1433  case ISD::SETNE:
1434  case ISD::SETUNE: return SPCC::FCC_NE;
1435  case ISD::SETLT:
1436  case ISD::SETOLT: return SPCC::FCC_L;
1437  case ISD::SETGT:
1438  case ISD::SETOGT: return SPCC::FCC_G;
1439  case ISD::SETLE:
1440  case ISD::SETOLE: return SPCC::FCC_LE;
1441  case ISD::SETGE:
1442  case ISD::SETOGE: return SPCC::FCC_GE;
1443  case ISD::SETULT: return SPCC::FCC_UL;
1444  case ISD::SETULE: return SPCC::FCC_ULE;
1445  case ISD::SETUGT: return SPCC::FCC_UG;
1446  case ISD::SETUGE: return SPCC::FCC_UGE;
1447  case ISD::SETUO: return SPCC::FCC_U;
1448  case ISD::SETO: return SPCC::FCC_O;
1449  case ISD::SETONE: return SPCC::FCC_LG;
1450  case ISD::SETUEQ: return SPCC::FCC_UE;
1451  }
1452 }
1453 
1455  const SparcSubtarget &STI)
1456  : TargetLowering(TM), Subtarget(&STI) {
1457  MVT PtrVT = MVT::getIntegerVT(TM.getPointerSizeInBits(0));
1458 
1459  // Instructions which use registers as conditionals examine all the
1460  // bits (as does the pseudo SELECT_CC expansion). I don't think it
1461  // matters much whether it's ZeroOrOneBooleanContent, or
1462  // ZeroOrNegativeOneBooleanContent, so, arbitrarily choose the
1463  // former.
1466 
1467  // Set up the register classes.
1468  addRegisterClass(MVT::i32, &SP::IntRegsRegClass);
1469  if (!Subtarget->useSoftFloat()) {
1470  addRegisterClass(MVT::f32, &SP::FPRegsRegClass);
1471  addRegisterClass(MVT::f64, &SP::DFPRegsRegClass);
1472  addRegisterClass(MVT::f128, &SP::QFPRegsRegClass);
1473  }
1474  if (Subtarget->is64Bit()) {
1475  addRegisterClass(MVT::i64, &SP::I64RegsRegClass);
1476  } else {
1477  // On 32bit sparc, we define a double-register 32bit register
1478  // class, as well. This is modeled in LLVM as a 2-vector of i32.
1479  addRegisterClass(MVT::v2i32, &SP::IntPairRegClass);
1480 
1481  // ...but almost all operations must be expanded, so set that as
1482  // the default.
1483  for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) {
1485  }
1486  // Truncating/extending stores/loads are also not supported.
1491 
1495 
1498  }
1499  // However, load and store *are* legal.
1504 
1505  // And we need to promote i64 loads/stores into vector load/store
1508 
1509  // Sadly, this doesn't work:
1510  // AddPromotedToType(ISD::LOAD, MVT::i64, MVT::v2i32);
1511  // AddPromotedToType(ISD::STORE, MVT::i64, MVT::v2i32);
1512  }
1513 
1514  // Turn FP extload into load/fpextend
1515  for (MVT VT : MVT::fp_valuetypes()) {
1519  }
1520 
1521  // Sparc doesn't have i1 sign extending load
1522  for (MVT VT : MVT::integer_valuetypes())
1524 
1525  // Turn FP truncstore into trunc + store.
1532 
1533  // Custom legalize GlobalAddress nodes into LO/HI parts.
1538 
1539  // Sparc doesn't have sext_inreg, replace them with shl/sra
1543 
1544  // Sparc has no REM or DIVREM operations.
1549 
1550  // ... nor does SparcV9.
1551  if (Subtarget->is64Bit()) {
1556  }
1557 
1558  // Custom expand fp<->sint
1563 
1564  // Custom Expand fp<->uint
1569 
1570  // Lower f16 conversion operations into library calls
1577 
1580 
1581  // Sparc has no select or setcc: expand to SELECT_CC.
1586 
1591 
1592  // Sparc doesn't have BRCOND either, it has BR_CC.
1600 
1605 
1610 
1611  if (Subtarget->is64Bit()) {
1622 
1624  Subtarget->usePopc() ? Legal : Expand);
1631  }
1632 
1633  // ATOMICs.
1634  // Atomics are supported on SparcV9. 32-bit atomics are also
1635  // supported by some Leon SparcV8 variants. Otherwise, atomics
1636  // are unsupported.
1637  if (Subtarget->isV9())
1639  else if (Subtarget->hasLeonCasa())
1641  else
1643 
1645 
1647 
1649 
1650  // Custom Lower Atomic LOAD/STORE
1653 
1654  if (Subtarget->is64Bit()) {
1659  }
1660 
1661  if (!Subtarget->is64Bit()) {
1662  // These libcalls are not available in 32-bit.
1663  setLibcallName(RTLIB::MULO_I64, nullptr);
1664  setLibcallName(RTLIB::SHL_I128, nullptr);
1665  setLibcallName(RTLIB::SRL_I128, nullptr);
1666  setLibcallName(RTLIB::SRA_I128, nullptr);
1667  }
1668 
1669  setLibcallName(RTLIB::MULO_I128, nullptr);
1670 
1671  if (!Subtarget->isV9()) {
1672  // SparcV8 does not have FNEGD and FABSD.
1675  }
1676 
1703 
1707 
1708  // Expands to [SU]MUL_LOHI.
1712 
1713  if (Subtarget->useSoftMulDiv()) {
1714  // .umul works for both signed and unsigned
1717  setLibcallName(RTLIB::MUL_I32, ".umul");
1718 
1720  setLibcallName(RTLIB::SDIV_I32, ".div");
1721 
1723  setLibcallName(RTLIB::UDIV_I32, ".udiv");
1724 
1725  setLibcallName(RTLIB::SREM_I32, ".rem");
1726  setLibcallName(RTLIB::UREM_I32, ".urem");
1727  }
1728 
1729  if (Subtarget->is64Bit()) {
1734 
1737 
1741  }
1742 
1743  // VASTART needs to be custom lowered to use the VarArgsFrameIndex.
1745  // VAARG needs to be lowered to not do unaligned accesses for doubles.
1747 
1750 
1751  // Use the default implementation.
1757 
1759 
1761  Subtarget->usePopc() ? Legal : Expand);
1762 
1763  if (Subtarget->isV9() && Subtarget->hasHardQuad()) {
1766  } else {
1769  }
1770 
1771  if (Subtarget->hasHardQuad()) {
1779  if (Subtarget->isV9()) {
1782  } else {
1785  }
1786 
1787  if (!Subtarget->is64Bit()) {
1788  setLibcallName(RTLIB::FPTOSINT_F128_I64, "_Q_qtoll");
1789  setLibcallName(RTLIB::FPTOUINT_F128_I64, "_Q_qtoull");
1790  setLibcallName(RTLIB::SINTTOFP_I64_F128, "_Q_lltoq");
1791  setLibcallName(RTLIB::UINTTOFP_I64_F128, "_Q_ulltoq");
1792  }
1793 
1794  } else {
1795  // Custom legalize f128 operations.
1796 
1804 
1808 
1809  // Setup Runtime library names.
1810  if (Subtarget->is64Bit() && !Subtarget->useSoftFloat()) {
1811  setLibcallName(RTLIB::ADD_F128, "_Qp_add");
1812  setLibcallName(RTLIB::SUB_F128, "_Qp_sub");
1813  setLibcallName(RTLIB::MUL_F128, "_Qp_mul");
1814  setLibcallName(RTLIB::DIV_F128, "_Qp_div");
1815  setLibcallName(RTLIB::SQRT_F128, "_Qp_sqrt");
1816  setLibcallName(RTLIB::FPTOSINT_F128_I32, "_Qp_qtoi");
1817  setLibcallName(RTLIB::FPTOUINT_F128_I32, "_Qp_qtoui");
1818  setLibcallName(RTLIB::SINTTOFP_I32_F128, "_Qp_itoq");
1819  setLibcallName(RTLIB::UINTTOFP_I32_F128, "_Qp_uitoq");
1820  setLibcallName(RTLIB::FPTOSINT_F128_I64, "_Qp_qtox");
1821  setLibcallName(RTLIB::FPTOUINT_F128_I64, "_Qp_qtoux");
1822  setLibcallName(RTLIB::SINTTOFP_I64_F128, "_Qp_xtoq");
1823  setLibcallName(RTLIB::UINTTOFP_I64_F128, "_Qp_uxtoq");
1824  setLibcallName(RTLIB::FPEXT_F32_F128, "_Qp_stoq");
1825  setLibcallName(RTLIB::FPEXT_F64_F128, "_Qp_dtoq");
1826  setLibcallName(RTLIB::FPROUND_F128_F32, "_Qp_qtos");
1827  setLibcallName(RTLIB::FPROUND_F128_F64, "_Qp_qtod");
1828  } else if (!Subtarget->useSoftFloat()) {
1829  setLibcallName(RTLIB::ADD_F128, "_Q_add");
1830  setLibcallName(RTLIB::SUB_F128, "_Q_sub");
1831  setLibcallName(RTLIB::MUL_F128, "_Q_mul");
1832  setLibcallName(RTLIB::DIV_F128, "_Q_div");
1833  setLibcallName(RTLIB::SQRT_F128, "_Q_sqrt");
1834  setLibcallName(RTLIB::FPTOSINT_F128_I32, "_Q_qtoi");
1835  setLibcallName(RTLIB::FPTOUINT_F128_I32, "_Q_qtou");
1836  setLibcallName(RTLIB::SINTTOFP_I32_F128, "_Q_itoq");
1837  setLibcallName(RTLIB::UINTTOFP_I32_F128, "_Q_utoq");
1838  setLibcallName(RTLIB::FPTOSINT_F128_I64, "_Q_qtoll");
1839  setLibcallName(RTLIB::FPTOUINT_F128_I64, "_Q_qtoull");
1840  setLibcallName(RTLIB::SINTTOFP_I64_F128, "_Q_lltoq");
1841  setLibcallName(RTLIB::UINTTOFP_I64_F128, "_Q_ulltoq");
1842  setLibcallName(RTLIB::FPEXT_F32_F128, "_Q_stoq");
1843  setLibcallName(RTLIB::FPEXT_F64_F128, "_Q_dtoq");
1844  setLibcallName(RTLIB::FPROUND_F128_F32, "_Q_qtos");
1845  setLibcallName(RTLIB::FPROUND_F128_F64, "_Q_qtod");
1846  }
1847  }
1848 
1849  if (Subtarget->fixAllFDIVSQRT()) {
1850  // Promote FDIVS and FSQRTS to FDIVD and FSQRTD instructions instead as
1851  // the former instructions generate errata on LEON processors.
1854  }
1855 
1856  if (Subtarget->hasNoFMULS()) {
1858  }
1859 
1860  // Custom combine bitcast between f64 and v2i32
1861  if (!Subtarget->is64Bit())
1863 
1864  if (Subtarget->hasLeonCycleCounter())
1866 
1868 
1870 
1872 }
1873 
1875  return Subtarget->useSoftFloat();
1876 }
1877 
1878 const char *SparcTargetLowering::getTargetNodeName(unsigned Opcode) const {
1879  switch ((SPISD::NodeType)Opcode) {
1880  case SPISD::FIRST_NUMBER: break;
1881  case SPISD::CMPICC: return "SPISD::CMPICC";
1882  case SPISD::CMPFCC: return "SPISD::CMPFCC";
1883  case SPISD::BRICC: return "SPISD::BRICC";
1884  case SPISD::BRXCC: return "SPISD::BRXCC";
1885  case SPISD::BRFCC: return "SPISD::BRFCC";
1886  case SPISD::SELECT_ICC: return "SPISD::SELECT_ICC";
1887  case SPISD::SELECT_XCC: return "SPISD::SELECT_XCC";
1888  case SPISD::SELECT_FCC: return "SPISD::SELECT_FCC";
1889  case SPISD::Hi: return "SPISD::Hi";
1890  case SPISD::Lo: return "SPISD::Lo";
1891  case SPISD::FTOI: return "SPISD::FTOI";
1892  case SPISD::ITOF: return "SPISD::ITOF";
1893  case SPISD::FTOX: return "SPISD::FTOX";
1894  case SPISD::XTOF: return "SPISD::XTOF";
1895  case SPISD::CALL: return "SPISD::CALL";
1896  case SPISD::RET_FLAG: return "SPISD::RET_FLAG";
1897  case SPISD::GLOBAL_BASE_REG: return "SPISD::GLOBAL_BASE_REG";
1898  case SPISD::FLUSHW: return "SPISD::FLUSHW";
1899  case SPISD::TLS_ADD: return "SPISD::TLS_ADD";
1900  case SPISD::TLS_LD: return "SPISD::TLS_LD";
1901  case SPISD::TLS_CALL: return "SPISD::TLS_CALL";
1902  case SPISD::TAIL_CALL: return "SPISD::TAIL_CALL";
1903  case SPISD::LOAD_GDOP: return "SPISD::LOAD_GDOP";
1904  }
1905  return nullptr;
1906 }
1907 
1909  EVT VT) const {
1910  if (!VT.isVector())
1911  return MVT::i32;
1913 }
1914 
1915 /// isMaskedValueZeroForTargetNode - Return true if 'Op & Mask' is known to
1916 /// be zero. Op is expected to be a target specific node. Used by DAG
1917 /// combiner.
1919  (const SDValue Op,
1920  KnownBits &Known,
1921  const APInt &DemandedElts,
1922  const SelectionDAG &DAG,
1923  unsigned Depth) const {
1924  KnownBits Known2;
1925  Known.resetAll();
1926 
1927  switch (Op.getOpcode()) {
1928  default: break;
1929  case SPISD::SELECT_ICC:
1930  case SPISD::SELECT_XCC:
1931  case SPISD::SELECT_FCC:
1932  Known = DAG.computeKnownBits(Op.getOperand(1), Depth + 1);
1933  Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
1934 
1935  // Only known if known in both the LHS and RHS.
1936  Known = KnownBits::commonBits(Known, Known2);
1937  break;
1938  }
1939 }
1940 
1941 // Look at LHS/RHS/CC and see if they are a lowered setcc instruction. If so
1942 // set LHS/RHS and SPCC to the LHS/RHS of the setcc and SPCC to the condition.
1944  ISD::CondCode CC, unsigned &SPCC) {
1945  if (isNullConstant(RHS) &&
1946  CC == ISD::SETNE &&
1947  (((LHS.getOpcode() == SPISD::SELECT_ICC ||
1948  LHS.getOpcode() == SPISD::SELECT_XCC) &&
1949  LHS.getOperand(3).getOpcode() == SPISD::CMPICC) ||
1950  (LHS.getOpcode() == SPISD::SELECT_FCC &&
1951  LHS.getOperand(3).getOpcode() == SPISD::CMPFCC)) &&
1952  isOneConstant(LHS.getOperand(0)) &&
1953  isNullConstant(LHS.getOperand(1))) {
1954  SDValue CMPCC = LHS.getOperand(3);
1955  SPCC = cast<ConstantSDNode>(LHS.getOperand(2))->getZExtValue();
1956  LHS = CMPCC.getOperand(0);
1957  RHS = CMPCC.getOperand(1);
1958  }
1959 }
1960 
1961 // Convert to a target node and set target flags.
1963  SelectionDAG &DAG) const {
1964  if (const GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op))
1965  return DAG.getTargetGlobalAddress(GA->getGlobal(),
1966  SDLoc(GA),
1967  GA->getValueType(0),
1968  GA->getOffset(), TF);
1969 
1970  if (const ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op))
1971  return DAG.getTargetConstantPool(CP->getConstVal(), CP->getValueType(0),
1972  CP->getAlign(), CP->getOffset(), TF);
1973 
1974  if (const BlockAddressSDNode *BA = dyn_cast<BlockAddressSDNode>(Op))
1975  return DAG.getTargetBlockAddress(BA->getBlockAddress(),
1976  Op.getValueType(),
1977  0,
1978  TF);
1979 
1980  if (const ExternalSymbolSDNode *ES = dyn_cast<ExternalSymbolSDNode>(Op))
1981  return DAG.getTargetExternalSymbol(ES->getSymbol(),
1982  ES->getValueType(0), TF);
1983 
1984  llvm_unreachable("Unhandled address SDNode");
1985 }
1986 
1987 // Split Op into high and low parts according to HiTF and LoTF.
1988 // Return an ADD node combining the parts.
1990  unsigned HiTF, unsigned LoTF,
1991  SelectionDAG &DAG) const {
1992  SDLoc DL(Op);
1993  EVT VT = Op.getValueType();
1994  SDValue Hi = DAG.getNode(SPISD::Hi, DL, VT, withTargetFlags(Op, HiTF, DAG));
1995  SDValue Lo = DAG.getNode(SPISD::Lo, DL, VT, withTargetFlags(Op, LoTF, DAG));
1996  return DAG.getNode(ISD::ADD, DL, VT, Hi, Lo);
1997 }
1998 
1999 // Build SDNodes for producing an address from a GlobalAddress, ConstantPool,
2000 // or ExternalSymbol SDNode.
2002  SDLoc DL(Op);
2003  EVT VT = getPointerTy(DAG.getDataLayout());
2004 
2005  // Handle PIC mode first. SPARC needs a got load for every variable!
2006  if (isPositionIndependent()) {
2007  const Module *M = DAG.getMachineFunction().getFunction().getParent();
2008  PICLevel::Level picLevel = M->getPICLevel();
2009  SDValue Idx;
2010 
2011  if (picLevel == PICLevel::SmallPIC) {
2012  // This is the pic13 code model, the GOT is known to be smaller than 8KiB.
2013  Idx = DAG.getNode(SPISD::Lo, DL, Op.getValueType(),
2015  } else {
2016  // This is the pic32 code model, the GOT is known to be smaller than 4GB.
2019  }
2020 
2021  SDValue GlobalBase = DAG.getNode(SPISD::GLOBAL_BASE_REG, DL, VT);
2022  SDValue AbsAddr = DAG.getNode(ISD::ADD, DL, VT, GlobalBase, Idx);
2023  // GLOBAL_BASE_REG codegen'ed with call. Inform MFI that this
2024  // function has calls.
2026  MFI.setHasCalls(true);
2027  return DAG.getLoad(VT, DL, DAG.getEntryNode(), AbsAddr,
2029  }
2030 
2031  // This is one of the absolute code models.
2032  switch(getTargetMachine().getCodeModel()) {
2033  default:
2034  llvm_unreachable("Unsupported absolute code model");
2035  case CodeModel::Small:
2036  // abs32.
2039  case CodeModel::Medium: {
2040  // abs44.
2043  H44 = DAG.getNode(ISD::SHL, DL, VT, H44, DAG.getConstant(12, DL, MVT::i32));
2045  L44 = DAG.getNode(SPISD::Lo, DL, VT, L44);
2046  return DAG.getNode(ISD::ADD, DL, VT, H44, L44);
2047  }
2048  case CodeModel::Large: {
2049  // abs64.
2052  Hi = DAG.getNode(ISD::SHL, DL, VT, Hi, DAG.getConstant(32, DL, MVT::i32));
2055  return DAG.getNode(ISD::ADD, DL, VT, Hi, Lo);
2056  }
2057  }
2058 }
2059 
2061  SelectionDAG &DAG) const {
2062  return makeAddress(Op, DAG);
2063 }
2064 
2066  SelectionDAG &DAG) const {
2067  return makeAddress(Op, DAG);
2068 }
2069 
2071  SelectionDAG &DAG) const {
2072  return makeAddress(Op, DAG);
2073 }
2074 
2076  SelectionDAG &DAG) const {
2077 
2078  GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
2079  if (DAG.getTarget().useEmulatedTLS())
2080  return LowerToTLSEmulatedModel(GA, DAG);
2081 
2082  SDLoc DL(GA);
2083  const GlobalValue *GV = GA->getGlobal();
2084  EVT PtrVT = getPointerTy(DAG.getDataLayout());
2085 
2087 
2089  unsigned HiTF = ((model == TLSModel::GeneralDynamic)
2092  unsigned LoTF = ((model == TLSModel::GeneralDynamic)
2095  unsigned addTF = ((model == TLSModel::GeneralDynamic)
2098  unsigned callTF = ((model == TLSModel::GeneralDynamic)
2101 
2102  SDValue HiLo = makeHiLoPair(Op, HiTF, LoTF, DAG);
2103  SDValue Base = DAG.getNode(SPISD::GLOBAL_BASE_REG, DL, PtrVT);
2104  SDValue Argument = DAG.getNode(SPISD::TLS_ADD, DL, PtrVT, Base, HiLo,
2105  withTargetFlags(Op, addTF, DAG));
2106 
2107  SDValue Chain = DAG.getEntryNode();
2108  SDValue InFlag;
2109 
2110  Chain = DAG.getCALLSEQ_START(Chain, 1, 0, DL);
2111  Chain = DAG.getCopyToReg(Chain, DL, SP::O0, Argument, InFlag);
2112  InFlag = Chain.getValue(1);
2113  SDValue Callee = DAG.getTargetExternalSymbol("__tls_get_addr", PtrVT);
2114  SDValue Symbol = withTargetFlags(Op, callTF, DAG);
2115 
2116  SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
2117  const uint32_t *Mask = Subtarget->getRegisterInfo()->getCallPreservedMask(
2119  assert(Mask && "Missing call preserved mask for calling convention");
2120  SDValue Ops[] = {Chain,
2121  Callee,
2122  Symbol,
2123  DAG.getRegister(SP::O0, PtrVT),
2124  DAG.getRegisterMask(Mask),
2125  InFlag};
2126  Chain = DAG.getNode(SPISD::TLS_CALL, DL, NodeTys, Ops);
2127  InFlag = Chain.getValue(1);
2128  Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(1, DL, true),
2129  DAG.getIntPtrConstant(0, DL, true), InFlag, DL);
2130  InFlag = Chain.getValue(1);
2131  SDValue Ret = DAG.getCopyFromReg(Chain, DL, SP::O0, PtrVT, InFlag);
2132 
2134  return Ret;
2135 
2136  SDValue Hi = DAG.getNode(SPISD::Hi, DL, PtrVT,
2138  SDValue Lo = DAG.getNode(SPISD::Lo, DL, PtrVT,
2140  HiLo = DAG.getNode(ISD::XOR, DL, PtrVT, Hi, Lo);
2141  return DAG.getNode(SPISD::TLS_ADD, DL, PtrVT, Ret, HiLo,
2143  }
2144 
2145  if (model == TLSModel::InitialExec) {
2146  unsigned ldTF = ((PtrVT == MVT::i64)? SparcMCExpr::VK_Sparc_TLS_IE_LDX
2148 
2149  SDValue Base = DAG.getNode(SPISD::GLOBAL_BASE_REG, DL, PtrVT);
2150 
2151  // GLOBAL_BASE_REG codegen'ed with call. Inform MFI that this
2152  // function has calls.
2154  MFI.setHasCalls(true);
2155 
2156  SDValue TGA = makeHiLoPair(Op,
2159  SDValue Ptr = DAG.getNode(ISD::ADD, DL, PtrVT, Base, TGA);
2160  SDValue Offset = DAG.getNode(SPISD::TLS_LD,
2161  DL, PtrVT, Ptr,
2162  withTargetFlags(Op, ldTF, DAG));
2163  return DAG.getNode(SPISD::TLS_ADD, DL, PtrVT,
2164  DAG.getRegister(SP::G7, PtrVT), Offset,
2167  }
2168 
2170  SDValue Hi = DAG.getNode(SPISD::Hi, DL, PtrVT,
2172  SDValue Lo = DAG.getNode(SPISD::Lo, DL, PtrVT,
2174  SDValue Offset = DAG.getNode(ISD::XOR, DL, PtrVT, Hi, Lo);
2175 
2176  return DAG.getNode(ISD::ADD, DL, PtrVT,
2177  DAG.getRegister(SP::G7, PtrVT), Offset);
2178 }
2179 
2182  const SDLoc &DL,
2183  SelectionDAG &DAG) const {
2185  EVT ArgVT = Arg.getValueType();
2186  Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
2187 
2188  ArgListEntry Entry;
2189  Entry.Node = Arg;
2190  Entry.Ty = ArgTy;
2191 
2192  if (ArgTy->isFP128Ty()) {
2193  // Create a stack object and pass the pointer to the library function.
2194  int FI = MFI.CreateStackObject(16, Align(8), false);
2195  SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
2196  Chain = DAG.getStore(Chain, DL, Entry.Node, FIPtr, MachinePointerInfo(),
2197  Align(8));
2198 
2199  Entry.Node = FIPtr;
2200  Entry.Ty = PointerType::getUnqual(ArgTy);
2201  }
2202  Args.push_back(Entry);
2203  return Chain;
2204 }
2205 
2206 SDValue
2208  const char *LibFuncName,
2209  unsigned numArgs) const {
2210 
2211  ArgListTy Args;
2212 
2214  auto PtrVT = getPointerTy(DAG.getDataLayout());
2215 
2216  SDValue Callee = DAG.getExternalSymbol(LibFuncName, PtrVT);
2217  Type *RetTy = Op.getValueType().getTypeForEVT(*DAG.getContext());
2218  Type *RetTyABI = RetTy;
2219  SDValue Chain = DAG.getEntryNode();
2220  SDValue RetPtr;
2221 
2222  if (RetTy->isFP128Ty()) {
2223  // Create a Stack Object to receive the return value of type f128.
2224  ArgListEntry Entry;
2225  int RetFI = MFI.CreateStackObject(16, Align(8), false);
2226  RetPtr = DAG.getFrameIndex(RetFI, PtrVT);
2227  Entry.Node = RetPtr;
2228  Entry.Ty = PointerType::getUnqual(RetTy);
2229  if (!Subtarget->is64Bit()) {
2230  Entry.IsSRet = true;
2231  Entry.IndirectType = RetTy;
2232  }
2233  Entry.IsReturned = false;
2234  Args.push_back(Entry);
2235  RetTyABI = Type::getVoidTy(*DAG.getContext());
2236  }
2237 
2238  assert(Op->getNumOperands() >= numArgs && "Not enough operands!");
2239  for (unsigned i = 0, e = numArgs; i != e; ++i) {
2240  Chain = LowerF128_LibCallArg(Chain, Args, Op.getOperand(i), SDLoc(Op), DAG);
2241  }
2243  CLI.setDebugLoc(SDLoc(Op)).setChain(Chain)
2244  .setCallee(CallingConv::C, RetTyABI, Callee, std::move(Args));
2245 
2246  std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
2247 
2248  // chain is in second result.
2249  if (RetTyABI == RetTy)
2250  return CallInfo.first;
2251 
2252  assert (RetTy->isFP128Ty() && "Unexpected return type!");
2253 
2254  Chain = CallInfo.second;
2255 
2256  // Load RetPtr to get the return value.
2257  return DAG.getLoad(Op.getValueType(), SDLoc(Op), Chain, RetPtr,
2258  MachinePointerInfo(), Align(8));
2259 }
2260 
2262  unsigned &SPCC, const SDLoc &DL,
2263  SelectionDAG &DAG) const {
2264 
2265  const char *LibCall = nullptr;
2266  bool is64Bit = Subtarget->is64Bit();
2267  switch(SPCC) {
2268  default: llvm_unreachable("Unhandled conditional code!");
2269  case SPCC::FCC_E : LibCall = is64Bit? "_Qp_feq" : "_Q_feq"; break;
2270  case SPCC::FCC_NE : LibCall = is64Bit? "_Qp_fne" : "_Q_fne"; break;
2271  case SPCC::FCC_L : LibCall = is64Bit? "_Qp_flt" : "_Q_flt"; break;
2272  case SPCC::FCC_G : LibCall = is64Bit? "_Qp_fgt" : "_Q_fgt"; break;
2273  case SPCC::FCC_LE : LibCall = is64Bit? "_Qp_fle" : "_Q_fle"; break;
2274  case SPCC::FCC_GE : LibCall = is64Bit? "_Qp_fge" : "_Q_fge"; break;
2275  case SPCC::FCC_UL :
2276  case SPCC::FCC_ULE:
2277  case SPCC::FCC_UG :
2278  case SPCC::FCC_UGE:
2279  case SPCC::FCC_U :
2280  case SPCC::FCC_O :
2281  case SPCC::FCC_LG :
2282  case SPCC::FCC_UE : LibCall = is64Bit? "_Qp_cmp" : "_Q_cmp"; break;
2283  }
2284 
2285  auto PtrVT = getPointerTy(DAG.getDataLayout());
2286  SDValue Callee = DAG.getExternalSymbol(LibCall, PtrVT);
2287  Type *RetTy = Type::getInt32Ty(*DAG.getContext());
2288  ArgListTy Args;
2289  SDValue Chain = DAG.getEntryNode();
2290  Chain = LowerF128_LibCallArg(Chain, Args, LHS, DL, DAG);
2291  Chain = LowerF128_LibCallArg(Chain, Args, RHS, DL, DAG);
2292 
2294  CLI.setDebugLoc(DL).setChain(Chain)
2296 
2297  std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
2298 
2299  // result is in first, and chain is in second result.
2300  SDValue Result = CallInfo.first;
2301 
2302  switch(SPCC) {
2303  default: {
2304  SDValue RHS = DAG.getConstant(0, DL, Result.getValueType());
2305  SPCC = SPCC::ICC_NE;
2306  return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2307  }
2308  case SPCC::FCC_UL : {
2309  SDValue Mask = DAG.getConstant(1, DL, Result.getValueType());
2310  Result = DAG.getNode(ISD::AND, DL, Result.getValueType(), Result, Mask);
2311  SDValue RHS = DAG.getConstant(0, DL, Result.getValueType());
2312  SPCC = SPCC::ICC_NE;
2313  return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2314  }
2315  case SPCC::FCC_ULE: {
2316  SDValue RHS = DAG.getConstant(2, DL, Result.getValueType());
2317  SPCC = SPCC::ICC_NE;
2318  return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2319  }
2320  case SPCC::FCC_UG : {
2321  SDValue RHS = DAG.getConstant(1, DL, Result.getValueType());
2322  SPCC = SPCC::ICC_G;
2323  return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2324  }
2325  case SPCC::FCC_UGE: {
2326  SDValue RHS = DAG.getConstant(1, DL, Result.getValueType());
2327  SPCC = SPCC::ICC_NE;
2328  return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2329  }
2330 
2331  case SPCC::FCC_U : {
2332  SDValue RHS = DAG.getConstant(3, DL, Result.getValueType());
2333  SPCC = SPCC::ICC_E;
2334  return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2335  }
2336  case SPCC::FCC_O : {
2337  SDValue RHS = DAG.getConstant(3, DL, Result.getValueType());
2338  SPCC = SPCC::ICC_NE;
2339  return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2340  }
2341  case SPCC::FCC_LG : {
2342  SDValue Mask = DAG.getConstant(3, DL, Result.getValueType());
2343  Result = DAG.getNode(ISD::AND, DL, Result.getValueType(), Result, Mask);
2344  SDValue RHS = DAG.getConstant(0, DL, Result.getValueType());
2345  SPCC = SPCC::ICC_NE;
2346  return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2347  }
2348  case SPCC::FCC_UE : {
2349  SDValue Mask = DAG.getConstant(3, DL, Result.getValueType());
2350  Result = DAG.getNode(ISD::AND, DL, Result.getValueType(), Result, Mask);
2351  SDValue RHS = DAG.getConstant(0, DL, Result.getValueType());
2352  SPCC = SPCC::ICC_E;
2353  return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2354  }
2355  }
2356 }
2357 
2358 static SDValue
2360  const SparcTargetLowering &TLI) {
2361 
2362  if (Op.getOperand(0).getValueType() == MVT::f64)
2363  return TLI.LowerF128Op(Op, DAG,
2364  TLI.getLibcallName(RTLIB::FPEXT_F64_F128), 1);
2365 
2366  if (Op.getOperand(0).getValueType() == MVT::f32)
2367  return TLI.LowerF128Op(Op, DAG,
2368  TLI.getLibcallName(RTLIB::FPEXT_F32_F128), 1);
2369 
2370  llvm_unreachable("fpextend with non-float operand!");
2371  return SDValue();
2372 }
2373 
2374 static SDValue
2376  const SparcTargetLowering &TLI) {
2377  // FP_ROUND on f64 and f32 are legal.
2378  if (Op.getOperand(0).getValueType() != MVT::f128)
2379  return Op;
2380 
2381  if (Op.getValueType() == MVT::f64)
2382  return TLI.LowerF128Op(Op, DAG,
2383  TLI.getLibcallName(RTLIB::FPROUND_F128_F64), 1);
2384  if (Op.getValueType() == MVT::f32)
2385  return TLI.LowerF128Op(Op, DAG,
2386  TLI.getLibcallName(RTLIB::FPROUND_F128_F32), 1);
2387 
2388  llvm_unreachable("fpround to non-float!");
2389  return SDValue();
2390 }
2391 
2393  const SparcTargetLowering &TLI,
2394  bool hasHardQuad) {
2395  SDLoc dl(Op);
2396  EVT VT = Op.getValueType();
2397  assert(VT == MVT::i32 || VT == MVT::i64);
2398 
2399  // Expand f128 operations to fp128 abi calls.
2400  if (Op.getOperand(0).getValueType() == MVT::f128
2401  && (!hasHardQuad || !TLI.isTypeLegal(VT))) {
2402  const char *libName = TLI.getLibcallName(VT == MVT::i32
2403  ? RTLIB::FPTOSINT_F128_I32
2404  : RTLIB::FPTOSINT_F128_I64);
2405  return TLI.LowerF128Op(Op, DAG, libName, 1);
2406  }
2407 
2408  // Expand if the resulting type is illegal.
2409  if (!TLI.isTypeLegal(VT))
2410  return SDValue();
2411 
2412  // Otherwise, Convert the fp value to integer in an FP register.
2413  if (VT == MVT::i32)
2414  Op = DAG.getNode(SPISD::FTOI, dl, MVT::f32, Op.getOperand(0));
2415  else
2416  Op = DAG.getNode(SPISD::FTOX, dl, MVT::f64, Op.getOperand(0));
2417 
2418  return DAG.getNode(ISD::BITCAST, dl, VT, Op);
2419 }
2420 
2422  const SparcTargetLowering &TLI,
2423  bool hasHardQuad) {
2424  SDLoc dl(Op);
2425  EVT OpVT = Op.getOperand(0).getValueType();
2426  assert(OpVT == MVT::i32 || (OpVT == MVT::i64));
2427 
2428  EVT floatVT = (OpVT == MVT::i32) ? MVT::f32 : MVT::f64;
2429 
2430  // Expand f128 operations to fp128 ABI calls.
2431  if (Op.getValueType() == MVT::f128
2432  && (!hasHardQuad || !TLI.isTypeLegal(OpVT))) {
2433  const char *libName = TLI.getLibcallName(OpVT == MVT::i32
2434  ? RTLIB::SINTTOFP_I32_F128
2435  : RTLIB::SINTTOFP_I64_F128);
2436  return TLI.LowerF128Op(Op, DAG, libName, 1);
2437  }
2438 
2439  // Expand if the operand type is illegal.
2440  if (!TLI.isTypeLegal(OpVT))
2441  return SDValue();
2442 
2443  // Otherwise, Convert the int value to FP in an FP register.
2444  SDValue Tmp = DAG.getNode(ISD::BITCAST, dl, floatVT, Op.getOperand(0));
2445  unsigned opcode = (OpVT == MVT::i32)? SPISD::ITOF : SPISD::XTOF;
2446  return DAG.getNode(opcode, dl, Op.getValueType(), Tmp);
2447 }
2448 
2450  const SparcTargetLowering &TLI,
2451  bool hasHardQuad) {
2452  SDLoc dl(Op);
2453  EVT VT = Op.getValueType();
2454 
2455  // Expand if it does not involve f128 or the target has support for
2456  // quad floating point instructions and the resulting type is legal.
2457  if (Op.getOperand(0).getValueType() != MVT::f128 ||
2458  (hasHardQuad && TLI.isTypeLegal(VT)))
2459  return SDValue();
2460 
2461  assert(VT == MVT::i32 || VT == MVT::i64);
2462 
2463  return TLI.LowerF128Op(Op, DAG,
2464  TLI.getLibcallName(VT == MVT::i32
2465  ? RTLIB::FPTOUINT_F128_I32
2466  : RTLIB::FPTOUINT_F128_I64),
2467  1);
2468 }
2469 
2471  const SparcTargetLowering &TLI,
2472  bool hasHardQuad) {
2473  SDLoc dl(Op);
2474  EVT OpVT = Op.getOperand(0).getValueType();
2475  assert(OpVT == MVT::i32 || OpVT == MVT::i64);
2476 
2477  // Expand if it does not involve f128 or the target has support for
2478  // quad floating point instructions and the operand type is legal.
2479  if (Op.getValueType() != MVT::f128 || (hasHardQuad && TLI.isTypeLegal(OpVT)))
2480  return SDValue();
2481 
2482  return TLI.LowerF128Op(Op, DAG,
2483  TLI.getLibcallName(OpVT == MVT::i32
2484  ? RTLIB::UINTTOFP_I32_F128
2485  : RTLIB::UINTTOFP_I64_F128),
2486  1);
2487 }
2488 
2490  const SparcTargetLowering &TLI,
2491  bool hasHardQuad) {
2492  SDValue Chain = Op.getOperand(0);
2493  ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
2494  SDValue LHS = Op.getOperand(2);
2495  SDValue RHS = Op.getOperand(3);
2496  SDValue Dest = Op.getOperand(4);
2497  SDLoc dl(Op);
2498  unsigned Opc, SPCC = ~0U;
2499 
2500  // If this is a br_cc of a "setcc", and if the setcc got lowered into
2501  // an CMP[IF]CC/SELECT_[IF]CC pair, find the original compared values.
2502  LookThroughSetCC(LHS, RHS, CC, SPCC);
2503 
2504  // Get the condition flag.
2505  SDValue CompareFlag;
2506  if (LHS.getValueType().isInteger()) {
2507  CompareFlag = DAG.getNode(SPISD::CMPICC, dl, MVT::Glue, LHS, RHS);
2508  if (SPCC == ~0U) SPCC = IntCondCCodeToICC(CC);
2509  // 32-bit compares use the icc flags, 64-bit uses the xcc flags.
2510  Opc = LHS.getValueType() == MVT::i32 ? SPISD::BRICC : SPISD::BRXCC;
2511  } else {
2512  if (!hasHardQuad && LHS.getValueType() == MVT::f128) {
2513  if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
2514  CompareFlag = TLI.LowerF128Compare(LHS, RHS, SPCC, dl, DAG);
2515  Opc = SPISD::BRICC;
2516  } else {
2517  CompareFlag = DAG.getNode(SPISD::CMPFCC, dl, MVT::Glue, LHS, RHS);
2518  if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
2519  Opc = SPISD::BRFCC;
2520  }
2521  }
2522  return DAG.getNode(Opc, dl, MVT::Other, Chain, Dest,
2523  DAG.getConstant(SPCC, dl, MVT::i32), CompareFlag);
2524 }
2525 
2527  const SparcTargetLowering &TLI,
2528  bool hasHardQuad) {
2529  SDValue LHS = Op.getOperand(0);
2530  SDValue RHS = Op.getOperand(1);
2531  ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
2532  SDValue TrueVal = Op.getOperand(2);
2533  SDValue FalseVal = Op.getOperand(3);
2534  SDLoc dl(Op);
2535  unsigned Opc, SPCC = ~0U;
2536 
2537  // If this is a select_cc of a "setcc", and if the setcc got lowered into
2538  // an CMP[IF]CC/SELECT_[IF]CC pair, find the original compared values.
2539  LookThroughSetCC(LHS, RHS, CC, SPCC);
2540 
2541  SDValue CompareFlag;
2542  if (LHS.getValueType().isInteger()) {
2543  CompareFlag = DAG.getNode(SPISD::CMPICC, dl, MVT::Glue, LHS, RHS);
2544  Opc = LHS.getValueType() == MVT::i32 ?
2546  if (SPCC == ~0U) SPCC = IntCondCCodeToICC(CC);
2547  } else {
2548  if (!hasHardQuad && LHS.getValueType() == MVT::f128) {
2549  if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
2550  CompareFlag = TLI.LowerF128Compare(LHS, RHS, SPCC, dl, DAG);
2551  Opc = SPISD::SELECT_ICC;
2552  } else {
2553  CompareFlag = DAG.getNode(SPISD::CMPFCC, dl, MVT::Glue, LHS, RHS);
2554  Opc = SPISD::SELECT_FCC;
2555  if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
2556  }
2557  }
2558  return DAG.getNode(Opc, dl, TrueVal.getValueType(), TrueVal, FalseVal,
2559  DAG.getConstant(SPCC, dl, MVT::i32), CompareFlag);
2560 }
2561 
2563  const SparcTargetLowering &TLI) {
2564  MachineFunction &MF = DAG.getMachineFunction();
2566  auto PtrVT = TLI.getPointerTy(DAG.getDataLayout());
2567 
2568  // Need frame address to find the address of VarArgsFrameIndex.
2570 
2571  // vastart just stores the address of the VarArgsFrameIndex slot into the
2572  // memory location argument.
2573  SDLoc DL(Op);
2574  SDValue Offset =
2575  DAG.getNode(ISD::ADD, DL, PtrVT, DAG.getRegister(SP::I6, PtrVT),
2576  DAG.getIntPtrConstant(FuncInfo->getVarArgsFrameOffset(), DL));
2577  const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
2578  return DAG.getStore(Op.getOperand(0), DL, Offset, Op.getOperand(1),
2579  MachinePointerInfo(SV));
2580 }
2581 
2583  SDNode *Node = Op.getNode();
2584  EVT VT = Node->getValueType(0);
2585  SDValue InChain = Node->getOperand(0);
2586  SDValue VAListPtr = Node->getOperand(1);
2587  EVT PtrVT = VAListPtr.getValueType();
2588  const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
2589  SDLoc DL(Node);
2590  SDValue VAList =
2591  DAG.getLoad(PtrVT, DL, InChain, VAListPtr, MachinePointerInfo(SV));
2592  // Increment the pointer, VAList, to the next vaarg.
2593  SDValue NextPtr = DAG.getNode(ISD::ADD, DL, PtrVT, VAList,
2594  DAG.getIntPtrConstant(VT.getSizeInBits()/8,
2595  DL));
2596  // Store the incremented VAList to the legalized pointer.
2597  InChain = DAG.getStore(VAList.getValue(1), DL, NextPtr, VAListPtr,
2598  MachinePointerInfo(SV));
2599  // Load the actual argument out of the pointer VAList.
2600  // We can't count on greater alignment than the word size.
2601  return DAG.getLoad(
2602  VT, DL, InChain, VAList, MachinePointerInfo(),
2603  std::min(PtrVT.getFixedSizeInBits(), VT.getFixedSizeInBits()) / 8);
2604 }
2605 
2607  const SparcSubtarget *Subtarget) {
2608  SDValue Chain = Op.getOperand(0); // Legalize the chain.
2609  SDValue Size = Op.getOperand(1); // Legalize the size.
2610  MaybeAlign Alignment =
2611  cast<ConstantSDNode>(Op.getOperand(2))->getMaybeAlignValue();
2612  Align StackAlign = Subtarget->getFrameLowering()->getStackAlign();
2613  EVT VT = Size->getValueType(0);
2614  SDLoc dl(Op);
2615 
2616  // TODO: implement over-aligned alloca. (Note: also implies
2617  // supporting support for overaligned function frames + dynamic
2618  // allocations, at all, which currently isn't supported)
2619  if (Alignment && *Alignment > StackAlign) {
2620  const MachineFunction &MF = DAG.getMachineFunction();
2621  report_fatal_error("Function \"" + Twine(MF.getName()) + "\": "
2622  "over-aligned dynamic alloca not supported.");
2623  }
2624 
2625  // The resultant pointer needs to be above the register spill area
2626  // at the bottom of the stack.
2627  unsigned regSpillArea;
2628  if (Subtarget->is64Bit()) {
2629  regSpillArea = 128;
2630  } else {
2631  // On Sparc32, the size of the spill area is 92. Unfortunately,
2632  // that's only 4-byte aligned, not 8-byte aligned (the stack
2633  // pointer is 8-byte aligned). So, if the user asked for an 8-byte
2634  // aligned dynamic allocation, we actually need to add 96 to the
2635  // bottom of the stack, instead of 92, to ensure 8-byte alignment.
2636 
2637  // That also means adding 4 to the size of the allocation --
2638  // before applying the 8-byte rounding. Unfortunately, we the
2639  // value we get here has already had rounding applied. So, we need
2640  // to add 8, instead, wasting a bit more memory.
2641 
2642  // Further, this only actually needs to be done if the required
2643  // alignment is > 4, but, we've lost that info by this point, too,
2644  // so we always apply it.
2645 
2646  // (An alternative approach would be to always reserve 96 bytes
2647  // instead of the required 92, but then we'd waste 4 extra bytes
2648  // in every frame, not just those with dynamic stack allocations)
2649 
2650  // TODO: modify code in SelectionDAGBuilder to make this less sad.
2651 
2652  Size = DAG.getNode(ISD::ADD, dl, VT, Size,
2653  DAG.getConstant(8, dl, VT));
2654  regSpillArea = 96;
2655  }
2656 
2657  unsigned SPReg = SP::O6;
2658  SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, VT);
2659  SDValue NewSP = DAG.getNode(ISD::SUB, dl, VT, SP, Size); // Value
2660  Chain = DAG.getCopyToReg(SP.getValue(1), dl, SPReg, NewSP); // Output chain
2661 
2662  regSpillArea += Subtarget->getStackPointerBias();
2663 
2664  SDValue NewVal = DAG.getNode(ISD::ADD, dl, VT, NewSP,
2665  DAG.getConstant(regSpillArea, dl, VT));
2666  SDValue Ops[2] = { NewVal, Chain };
2667  return DAG.getMergeValues(Ops, dl);
2668 }
2669 
2670 
2672  SDLoc dl(Op);
2673  SDValue Chain = DAG.getNode(SPISD::FLUSHW,
2674  dl, MVT::Other, DAG.getEntryNode());
2675  return Chain;
2676 }
2677 
2679  const SparcSubtarget *Subtarget,
2680  bool AlwaysFlush = false) {
2682  MFI.setFrameAddressIsTaken(true);
2683 
2684  EVT VT = Op.getValueType();
2685  SDLoc dl(Op);
2686  unsigned FrameReg = SP::I6;
2687  unsigned stackBias = Subtarget->getStackPointerBias();
2688 
2689  SDValue FrameAddr;
2690  SDValue Chain;
2691 
2692  // flush first to make sure the windowed registers' values are in stack
2693  Chain = (depth || AlwaysFlush) ? getFLUSHW(Op, DAG) : DAG.getEntryNode();
2694 
2695  FrameAddr = DAG.getCopyFromReg(Chain, dl, FrameReg, VT);
2696 
2697  unsigned Offset = (Subtarget->is64Bit()) ? (stackBias + 112) : 56;
2698 
2699  while (depth--) {
2700  SDValue Ptr = DAG.getNode(ISD::ADD, dl, VT, FrameAddr,
2701  DAG.getIntPtrConstant(Offset, dl));
2702  FrameAddr = DAG.getLoad(VT, dl, Chain, Ptr, MachinePointerInfo());
2703  }
2704  if (Subtarget->is64Bit())
2705  FrameAddr = DAG.getNode(ISD::ADD, dl, VT, FrameAddr,
2706  DAG.getIntPtrConstant(stackBias, dl));
2707  return FrameAddr;
2708 }
2709 
2710 
2712  const SparcSubtarget *Subtarget) {
2713 
2714  uint64_t depth = Op.getConstantOperandVal(0);
2715 
2716  return getFRAMEADDR(depth, Op, DAG, Subtarget);
2717 
2718 }
2719 
2721  const SparcTargetLowering &TLI,
2722  const SparcSubtarget *Subtarget) {
2723  MachineFunction &MF = DAG.getMachineFunction();
2724  MachineFrameInfo &MFI = MF.getFrameInfo();
2725  MFI.setReturnAddressIsTaken(true);
2726 
2728  return SDValue();
2729 
2730  EVT VT = Op.getValueType();
2731  SDLoc dl(Op);
2732  uint64_t depth = Op.getConstantOperandVal(0);
2733 
2734  SDValue RetAddr;
2735  if (depth == 0) {
2736  auto PtrVT = TLI.getPointerTy(DAG.getDataLayout());
2737  Register RetReg = MF.addLiveIn(SP::I7, TLI.getRegClassFor(PtrVT));
2738  RetAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, RetReg, VT);
2739  return RetAddr;
2740  }
2741 
2742  // Need frame address to find return address of the caller.
2743  SDValue FrameAddr = getFRAMEADDR(depth - 1, Op, DAG, Subtarget, true);
2744 
2745  unsigned Offset = (Subtarget->is64Bit()) ? 120 : 60;
2746  SDValue Ptr = DAG.getNode(ISD::ADD,
2747  dl, VT,
2748  FrameAddr,
2749  DAG.getIntPtrConstant(Offset, dl));
2750  RetAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), Ptr, MachinePointerInfo());
2751 
2752  return RetAddr;
2753 }
2754 
2755 static SDValue LowerF64Op(SDValue SrcReg64, const SDLoc &dl, SelectionDAG &DAG,
2756  unsigned opcode) {
2757  assert(SrcReg64.getValueType() == MVT::f64 && "LowerF64Op called on non-double!");
2758  assert(opcode == ISD::FNEG || opcode == ISD::FABS);
2759 
2760  // Lower fneg/fabs on f64 to fneg/fabs on f32.
2761  // fneg f64 => fneg f32:sub_even, fmov f32:sub_odd.
2762  // fabs f64 => fabs f32:sub_even, fmov f32:sub_odd.
2763 
2764  // Note: in little-endian, the floating-point value is stored in the
2765  // registers are in the opposite order, so the subreg with the sign
2766  // bit is the highest-numbered (odd), rather than the
2767  // lowest-numbered (even).
2768 
2769  SDValue Hi32 = DAG.getTargetExtractSubreg(SP::sub_even, dl, MVT::f32,
2770  SrcReg64);
2771  SDValue Lo32 = DAG.getTargetExtractSubreg(SP::sub_odd, dl, MVT::f32,
2772  SrcReg64);
2773 
2774  if (DAG.getDataLayout().isLittleEndian())
2775  Lo32 = DAG.getNode(opcode, dl, MVT::f32, Lo32);
2776  else
2777  Hi32 = DAG.getNode(opcode, dl, MVT::f32, Hi32);
2778 
2779  SDValue DstReg64 = SDValue(DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF,
2780  dl, MVT::f64), 0);
2781  DstReg64 = DAG.getTargetInsertSubreg(SP::sub_even, dl, MVT::f64,
2782  DstReg64, Hi32);
2783  DstReg64 = DAG.getTargetInsertSubreg(SP::sub_odd, dl, MVT::f64,
2784  DstReg64, Lo32);
2785  return DstReg64;
2786 }
2787 
2788 // Lower a f128 load into two f64 loads.
2790 {
2791  SDLoc dl(Op);
2792  LoadSDNode *LdNode = cast<LoadSDNode>(Op.getNode());
2793  assert(LdNode->getOffset().isUndef() && "Unexpected node type");
2794 
2795  Align Alignment = commonAlignment(LdNode->getOriginalAlign(), 8);
2796 
2797  SDValue Hi64 =
2798  DAG.getLoad(MVT::f64, dl, LdNode->getChain(), LdNode->getBasePtr(),
2799  LdNode->getPointerInfo(), Alignment);
2800  EVT addrVT = LdNode->getBasePtr().getValueType();
2801  SDValue LoPtr = DAG.getNode(ISD::ADD, dl, addrVT,
2802  LdNode->getBasePtr(),
2803  DAG.getConstant(8, dl, addrVT));
2804  SDValue Lo64 = DAG.getLoad(MVT::f64, dl, LdNode->getChain(), LoPtr,
2805  LdNode->getPointerInfo().getWithOffset(8),
2806  Alignment);
2807 
2808  SDValue SubRegEven = DAG.getTargetConstant(SP::sub_even64, dl, MVT::i32);
2809  SDValue SubRegOdd = DAG.getTargetConstant(SP::sub_odd64, dl, MVT::i32);
2810 
2811  SDNode *InFP128 = DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF,
2812  dl, MVT::f128);
2813  InFP128 = DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, dl,
2814  MVT::f128,
2815  SDValue(InFP128, 0),
2816  Hi64,
2817  SubRegEven);
2818  InFP128 = DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, dl,
2819  MVT::f128,
2820  SDValue(InFP128, 0),
2821  Lo64,
2822  SubRegOdd);
2823  SDValue OutChains[2] = { SDValue(Hi64.getNode(), 1),
2824  SDValue(Lo64.getNode(), 1) };
2825  SDValue OutChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
2826  SDValue Ops[2] = {SDValue(InFP128,0), OutChain};
2827  return DAG.getMergeValues(Ops, dl);
2828 }
2829 
2831 {
2832  LoadSDNode *LdNode = cast<LoadSDNode>(Op.getNode());
2833 
2834  EVT MemVT = LdNode->getMemoryVT();
2835  if (MemVT == MVT::f128)
2836  return LowerF128Load(Op, DAG);
2837 
2838  return Op;
2839 }
2840 
2841 // Lower a f128 store into two f64 stores.
2843  SDLoc dl(Op);
2844  StoreSDNode *StNode = cast<StoreSDNode>(Op.getNode());
2845  assert(StNode->getOffset().isUndef() && "Unexpected node type");
2846 
2847  SDValue SubRegEven = DAG.getTargetConstant(SP::sub_even64, dl, MVT::i32);
2848  SDValue SubRegOdd = DAG.getTargetConstant(SP::sub_odd64, dl, MVT::i32);
2849 
2850  SDNode *Hi64 = DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG,
2851  dl,
2852  MVT::f64,
2853  StNode->getValue(),
2854  SubRegEven);
2855  SDNode *Lo64 = DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG,
2856  dl,
2857  MVT::f64,
2858  StNode->getValue(),
2859  SubRegOdd);
2860 
2861  Align Alignment = commonAlignment(StNode->getOriginalAlign(), 8);
2862 
2863  SDValue OutChains[2];
2864  OutChains[0] =
2865  DAG.getStore(StNode->getChain(), dl, SDValue(Hi64, 0),
2866  StNode->getBasePtr(), StNode->getPointerInfo(),
2867  Alignment);
2868  EVT addrVT = StNode->getBasePtr().getValueType();
2869  SDValue LoPtr = DAG.getNode(ISD::ADD, dl, addrVT,
2870  StNode->getBasePtr(),
2871  DAG.getConstant(8, dl, addrVT));
2872  OutChains[1] = DAG.getStore(StNode->getChain(), dl, SDValue(Lo64, 0), LoPtr,
2873  StNode->getPointerInfo().getWithOffset(8),
2874  Alignment);
2875  return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
2876 }
2877 
2879 {
2880  SDLoc dl(Op);
2881  StoreSDNode *St = cast<StoreSDNode>(Op.getNode());
2882 
2883  EVT MemVT = St->getMemoryVT();
2884  if (MemVT == MVT::f128)
2885  return LowerF128Store(Op, DAG);
2886 
2887  if (MemVT == MVT::i64) {
2888  // Custom handling for i64 stores: turn it into a bitcast and a
2889  // v2i32 store.
2890  SDValue Val = DAG.getNode(ISD::BITCAST, dl, MVT::v2i32, St->getValue());
2891  SDValue Chain = DAG.getStore(
2892  St->getChain(), dl, Val, St->getBasePtr(), St->getPointerInfo(),
2893  St->getOriginalAlign(), St->getMemOperand()->getFlags(),
2894  St->getAAInfo());
2895  return Chain;
2896  }
2897 
2898  return SDValue();
2899 }
2900 
2901 static SDValue LowerFNEGorFABS(SDValue Op, SelectionDAG &DAG, bool isV9) {
2902  assert((Op.getOpcode() == ISD::FNEG || Op.getOpcode() == ISD::FABS)
2903  && "invalid opcode");
2904 
2905  SDLoc dl(Op);
2906 
2907  if (Op.getValueType() == MVT::f64)
2908  return LowerF64Op(Op.getOperand(0), dl, DAG, Op.getOpcode());
2909  if (Op.getValueType() != MVT::f128)
2910  return Op;
2911 
2912  // Lower fabs/fneg on f128 to fabs/fneg on f64
2913  // fabs/fneg f128 => fabs/fneg f64:sub_even64, fmov f64:sub_odd64
2914  // (As with LowerF64Op, on little-endian, we need to negate the odd
2915  // subreg)
2916 
2917  SDValue SrcReg128 = Op.getOperand(0);
2918  SDValue Hi64 = DAG.getTargetExtractSubreg(SP::sub_even64, dl, MVT::f64,
2919  SrcReg128);
2920  SDValue Lo64 = DAG.getTargetExtractSubreg(SP::sub_odd64, dl, MVT::f64,
2921  SrcReg128);
2922 
2923  if (DAG.getDataLayout().isLittleEndian()) {
2924  if (isV9)
2925  Lo64 = DAG.getNode(Op.getOpcode(), dl, MVT::f64, Lo64);
2926  else
2927  Lo64 = LowerF64Op(Lo64, dl, DAG, Op.getOpcode());
2928  } else {
2929  if (isV9)
2930  Hi64 = DAG.getNode(Op.getOpcode(), dl, MVT::f64, Hi64);
2931  else
2932  Hi64 = LowerF64Op(Hi64, dl, DAG, Op.getOpcode());
2933  }
2934 
2935  SDValue DstReg128 = SDValue(DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF,
2936  dl, MVT::f128), 0);
2937  DstReg128 = DAG.getTargetInsertSubreg(SP::sub_even64, dl, MVT::f128,
2938  DstReg128, Hi64);
2939  DstReg128 = DAG.getTargetInsertSubreg(SP::sub_odd64, dl, MVT::f128,
2940  DstReg128, Lo64);
2941  return DstReg128;
2942 }
2943 
2945 
2946  if (Op.getValueType() != MVT::i64)
2947  return Op;
2948 
2949  SDLoc dl(Op);
2950  SDValue Src1 = Op.getOperand(0);
2951  SDValue Src1Lo = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Src1);
2952  SDValue Src1Hi = DAG.getNode(ISD::SRL, dl, MVT::i64, Src1,
2953  DAG.getConstant(32, dl, MVT::i64));
2954  Src1Hi = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Src1Hi);
2955 
2956  SDValue Src2 = Op.getOperand(1);
2957  SDValue Src2Lo = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Src2);
2958  SDValue Src2Hi = DAG.getNode(ISD::SRL, dl, MVT::i64, Src2,
2959  DAG.getConstant(32, dl, MVT::i64));
2960  Src2Hi = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Src2Hi);
2961 
2962 
2963  bool hasChain = false;
2964  unsigned hiOpc = Op.getOpcode();
2965  switch (Op.getOpcode()) {
2966  default: llvm_unreachable("Invalid opcode");
2967  case ISD::ADDC: hiOpc = ISD::ADDE; break;
2968  case ISD::ADDE: hasChain = true; break;
2969  case ISD::SUBC: hiOpc = ISD::SUBE; break;
2970  case ISD::SUBE: hasChain = true; break;
2971  }
2972  SDValue Lo;
2973  SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Glue);
2974  if (hasChain) {
2975  Lo = DAG.getNode(Op.getOpcode(), dl, VTs, Src1Lo, Src2Lo,
2976  Op.getOperand(2));
2977  } else {
2978  Lo = DAG.getNode(Op.getOpcode(), dl, VTs, Src1Lo, Src2Lo);
2979  }
2980  SDValue Hi = DAG.getNode(hiOpc, dl, VTs, Src1Hi, Src2Hi, Lo.getValue(1));
2981  SDValue Carry = Hi.getValue(1);
2982 
2983  Lo = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Lo);
2984  Hi = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Hi);
2985  Hi = DAG.getNode(ISD::SHL, dl, MVT::i64, Hi,
2986  DAG.getConstant(32, dl, MVT::i64));
2987 
2988  SDValue Dst = DAG.getNode(ISD::OR, dl, MVT::i64, Hi, Lo);
2989  SDValue Ops[2] = { Dst, Carry };
2990  return DAG.getMergeValues(Ops, dl);
2991 }
2992 
2993 // Custom lower UMULO/SMULO for SPARC. This code is similar to ExpandNode()
2994 // in LegalizeDAG.cpp except the order of arguments to the library function.
2996  const SparcTargetLowering &TLI)
2997 {
2998  unsigned opcode = Op.getOpcode();
2999  assert((opcode == ISD::UMULO || opcode == ISD::SMULO) && "Invalid Opcode.");
3000 
3001  bool isSigned = (opcode == ISD::SMULO);
3002  EVT VT = MVT::i64;
3003  EVT WideVT = MVT::i128;
3004  SDLoc dl(Op);
3005  SDValue LHS = Op.getOperand(0);
3006 
3007  if (LHS.getValueType() != VT)
3008  return Op;
3009 
3010  SDValue ShiftAmt = DAG.getConstant(63, dl, VT);
3011 
3012  SDValue RHS = Op.getOperand(1);
3013  SDValue HiLHS, HiRHS;
3014  if (isSigned) {
3015  HiLHS = DAG.getNode(ISD::SRA, dl, VT, LHS, ShiftAmt);
3016  HiRHS = DAG.getNode(ISD::SRA, dl, MVT::i64, RHS, ShiftAmt);
3017  } else {
3018  HiLHS = DAG.getConstant(0, dl, VT);
3019  HiRHS = DAG.getConstant(0, dl, MVT::i64);
3020  }
3021 
3022  SDValue Args[] = { HiLHS, LHS, HiRHS, RHS };
3023 
3025  CallOptions.setSExt(isSigned);
3026  SDValue MulResult = TLI.makeLibCall(DAG,
3027  RTLIB::MUL_I128, WideVT,
3028  Args, CallOptions, dl).first;
3029  SDValue BottomHalf = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VT,
3030  MulResult, DAG.getIntPtrConstant(0, dl));
3031  SDValue TopHalf = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VT,
3032  MulResult, DAG.getIntPtrConstant(1, dl));
3033  if (isSigned) {
3034  SDValue Tmp1 = DAG.getNode(ISD::SRA, dl, VT, BottomHalf, ShiftAmt);
3035  TopHalf = DAG.getSetCC(dl, MVT::i32, TopHalf, Tmp1, ISD::SETNE);
3036  } else {
3037  TopHalf = DAG.getSetCC(dl, MVT::i32, TopHalf, DAG.getConstant(0, dl, VT),
3038  ISD::SETNE);
3039  }
3040  // MulResult is a node with an illegal type. Because such things are not
3041  // generally permitted during this phase of legalization, ensure that
3042  // nothing is left using the node. The above EXTRACT_ELEMENT nodes should have
3043  // been folded.
3044  assert(MulResult->use_empty() && "Illegally typed node still in use!");
3045 
3046  SDValue Ops[2] = { BottomHalf, TopHalf } ;
3047  return DAG.getMergeValues(Ops, dl);
3048 }
3049 
3051  if (isStrongerThanMonotonic(cast<AtomicSDNode>(Op)->getSuccessOrdering())) {
3052  // Expand with a fence.
3053  return SDValue();
3054  }
3055 
3056  // Monotonic load/stores are legal.
3057  return Op;
3058 }
3059 
3061  SelectionDAG &DAG) const {
3062  unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
3063  SDLoc dl(Op);
3064  switch (IntNo) {
3065  default: return SDValue(); // Don't custom lower most intrinsics.
3066  case Intrinsic::thread_pointer: {
3067  EVT PtrVT = getPointerTy(DAG.getDataLayout());
3068  return DAG.getRegister(SP::G7, PtrVT);
3069  }
3070  }
3071 }
3072 
3075 
3076  bool hasHardQuad = Subtarget->hasHardQuad();
3077  bool isV9 = Subtarget->isV9();
3078 
3079  switch (Op.getOpcode()) {
3080  default: llvm_unreachable("Should not custom lower this!");
3081 
3082  case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG, *this,
3083  Subtarget);
3084  case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG,
3085  Subtarget);
3086  case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
3087  case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
3088  case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
3089  case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
3090  case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG, *this,
3091  hasHardQuad);
3092  case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG, *this,
3093  hasHardQuad);
3094  case ISD::FP_TO_UINT: return LowerFP_TO_UINT(Op, DAG, *this,
3095  hasHardQuad);
3096  case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG, *this,
3097  hasHardQuad);
3098  case ISD::BR_CC: return LowerBR_CC(Op, DAG, *this,
3099  hasHardQuad);
3100  case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG, *this,
3101  hasHardQuad);
3102  case ISD::VASTART: return LowerVASTART(Op, DAG, *this);
3103  case ISD::VAARG: return LowerVAARG(Op, DAG);
3105  Subtarget);
3106 
3107  case ISD::LOAD: return LowerLOAD(Op, DAG);
3108  case ISD::STORE: return LowerSTORE(Op, DAG);
3109  case ISD::FADD: return LowerF128Op(Op, DAG,
3110  getLibcallName(RTLIB::ADD_F128), 2);
3111  case ISD::FSUB: return LowerF128Op(Op, DAG,
3112  getLibcallName(RTLIB::SUB_F128), 2);
3113  case ISD::FMUL: return LowerF128Op(Op, DAG,
3114  getLibcallName(RTLIB::MUL_F128), 2);
3115  case ISD::FDIV: return LowerF128Op(Op, DAG,
3116  getLibcallName(RTLIB::DIV_F128), 2);
3117  case ISD::FSQRT: return LowerF128Op(Op, DAG,
3118  getLibcallName(RTLIB::SQRT_F128),1);
3119  case ISD::FABS:
3120  case ISD::FNEG: return LowerFNEGorFABS(Op, DAG, isV9);
3121  case ISD::FP_EXTEND: return LowerF128_FPEXTEND(Op, DAG, *this);
3122  case ISD::FP_ROUND: return LowerF128_FPROUND(Op, DAG, *this);
3123  case ISD::ADDC:
3124  case ISD::ADDE:
3125  case ISD::SUBC:
3126  case ISD::SUBE: return LowerADDC_ADDE_SUBC_SUBE(Op, DAG);
3127  case ISD::UMULO:
3128  case ISD::SMULO: return LowerUMULO_SMULO(Op, DAG, *this);
3129  case ISD::ATOMIC_LOAD:
3130  case ISD::ATOMIC_STORE: return LowerATOMIC_LOAD_STORE(Op, DAG);
3132  }
3133 }
3134 
3136  const SDLoc &DL,
3137  SelectionDAG &DAG) const {
3138  APInt V = C->getValueAPF().bitcastToAPInt();
3139  SDValue Lo = DAG.getConstant(V.zextOrTrunc(32), DL, MVT::i32);
3140  SDValue Hi = DAG.getConstant(V.lshr(32).zextOrTrunc(32), DL, MVT::i32);
3141  if (DAG.getDataLayout().isLittleEndian())
3142  std::swap(Lo, Hi);
3143  return DAG.getBuildVector(MVT::v2i32, DL, {Hi, Lo});
3144 }
3145 
3147  DAGCombinerInfo &DCI) const {
3148  SDLoc dl(N);
3149  SDValue Src = N->getOperand(0);
3150 
3151  if (isa<ConstantFPSDNode>(Src) && N->getSimpleValueType(0) == MVT::v2i32 &&
3152  Src.getSimpleValueType() == MVT::f64)
3153  return bitcastConstantFPToInt(cast<ConstantFPSDNode>(Src), dl, DCI.DAG);
3154 
3155  return SDValue();
3156 }
3157 
3159  DAGCombinerInfo &DCI) const {
3160  switch (N->getOpcode()) {
3161  default:
3162  break;
3163  case ISD::BITCAST:
3164  return PerformBITCASTCombine(N, DCI);
3165  }
3166  return SDValue();
3167 }
3168 
3171  MachineBasicBlock *BB) const {
3172  switch (MI.getOpcode()) {
3173  default: llvm_unreachable("Unknown SELECT_CC!");
3174  case SP::SELECT_CC_Int_ICC:
3175  case SP::SELECT_CC_FP_ICC:
3176  case SP::SELECT_CC_DFP_ICC:
3177  case SP::SELECT_CC_QFP_ICC:
3178  return expandSelectCC(MI, BB, SP::BCOND);
3179  case SP::SELECT_CC_Int_XCC:
3180  case SP::SELECT_CC_FP_XCC:
3181  case SP::SELECT_CC_DFP_XCC:
3182  case SP::SELECT_CC_QFP_XCC:
3183  return expandSelectCC(MI, BB, SP::BPXCC);
3184  case SP::SELECT_CC_Int_FCC:
3185  case SP::SELECT_CC_FP_FCC:
3186  case SP::SELECT_CC_DFP_FCC:
3187  case SP::SELECT_CC_QFP_FCC:
3188  return expandSelectCC(MI, BB, SP::FBCOND);
3189  }
3190 }
3191 
3194  unsigned BROpcode) const {
3195  const TargetInstrInfo &TII = *Subtarget->getInstrInfo();
3196  DebugLoc dl = MI.getDebugLoc();
3197  unsigned CC = (SPCC::CondCodes)MI.getOperand(3).getImm();
3198 
3199  // To "insert" a SELECT_CC instruction, we actually have to insert the
3200  // triangle control-flow pattern. The incoming instruction knows the
3201  // destination vreg to set, the condition code register to branch on, the
3202  // true/false values to select between, and the condition code for the branch.
3203  //
3204  // We produce the following control flow:
3205  // ThisMBB
3206  // | \
3207  // | IfFalseMBB
3208  // | /
3209  // SinkMBB
3210  const BasicBlock *LLVM_BB = BB->getBasicBlock();
3211  MachineFunction::iterator It = ++BB->getIterator();
3212 
3213  MachineBasicBlock *ThisMBB = BB;
3214  MachineFunction *F = BB->getParent();
3215  MachineBasicBlock *IfFalseMBB = F->CreateMachineBasicBlock(LLVM_BB);
3216  MachineBasicBlock *SinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
3217  F->insert(It, IfFalseMBB);
3218  F->insert(It, SinkMBB);
3219 
3220  // Transfer the remainder of ThisMBB and its successor edges to SinkMBB.
3221  SinkMBB->splice(SinkMBB->begin(), ThisMBB,
3222  std::next(MachineBasicBlock::iterator(MI)), ThisMBB->end());
3223  SinkMBB->transferSuccessorsAndUpdatePHIs(ThisMBB);
3224 
3225  // Set the new successors for ThisMBB.
3226  ThisMBB->addSuccessor(IfFalseMBB);
3227  ThisMBB->addSuccessor(SinkMBB);
3228 
3229  BuildMI(ThisMBB, dl, TII.get(BROpcode))
3230  .addMBB(SinkMBB)
3231  .addImm(CC);
3232 
3233  // IfFalseMBB just falls through to SinkMBB.
3234  IfFalseMBB->addSuccessor(SinkMBB);
3235 
3236  // %Result = phi [ %TrueValue, ThisMBB ], [ %FalseValue, IfFalseMBB ]
3237  BuildMI(*SinkMBB, SinkMBB->begin(), dl, TII.get(SP::PHI),
3238  MI.getOperand(0).getReg())
3239  .addReg(MI.getOperand(1).getReg())
3240  .addMBB(ThisMBB)
3241  .addReg(MI.getOperand(2).getReg())
3242  .addMBB(IfFalseMBB);
3243 
3244  MI.eraseFromParent(); // The pseudo instruction is gone now.
3245  return SinkMBB;
3246 }
3247 
3248 //===----------------------------------------------------------------------===//
3249 // Sparc Inline Assembly Support
3250 //===----------------------------------------------------------------------===//
3251 
3252 /// getConstraintType - Given a constraint letter, return the type of
3253 /// constraint it is for this target.
3256  if (Constraint.size() == 1) {
3257  switch (Constraint[0]) {
3258  default: break;
3259  case 'r':
3260  case 'f':
3261  case 'e':
3262  return C_RegisterClass;
3263  case 'I': // SIMM13
3264  return C_Immediate;
3265  }
3266  }
3267 
3268  return TargetLowering::getConstraintType(Constraint);
3269 }
3270 
3273  const char *constraint) const {
3274  ConstraintWeight weight = CW_Invalid;
3275  Value *CallOperandVal = info.CallOperandVal;
3276  // If we don't have a value, we can't do a match,
3277  // but allow it at the lowest weight.
3278  if (!CallOperandVal)
3279  return CW_Default;
3280 
3281  // Look at the constraint type.
3282  switch (*constraint) {
3283  default:
3285  break;
3286  case 'I': // SIMM13
3287  if (ConstantInt *C = dyn_cast<ConstantInt>(info.CallOperandVal)) {
3288  if (isInt<13>(C->getSExtValue()))
3289  weight = CW_Constant;
3290  }
3291  break;
3292  }
3293  return weight;
3294 }
3295 
3296 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
3297 /// vector. If it is invalid, don't add anything to Ops.
3300  std::string &Constraint,
3301  std::vector<SDValue> &Ops,
3302  SelectionDAG &DAG) const {
3303  SDValue Result;
3304 
3305  // Only support length 1 constraints for now.
3306  if (Constraint.length() > 1)
3307  return;
3308 
3309  char ConstraintLetter = Constraint[0];
3310  switch (ConstraintLetter) {
3311  default: break;
3312  case 'I':
3313  if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
3314  if (isInt<13>(C->getSExtValue())) {
3315  Result = DAG.getTargetConstant(C->getSExtValue(), SDLoc(Op),
3316  Op.getValueType());
3317  break;
3318  }
3319  return;
3320  }
3321  }
3322 
3323  if (Result.getNode()) {
3324  Ops.push_back(Result);
3325  return;
3326  }
3327  TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
3328 }
3329 
3330 std::pair<unsigned, const TargetRegisterClass *>
3332  StringRef Constraint,
3333  MVT VT) const {
3334  if (Constraint.empty())
3335  return std::make_pair(0U, nullptr);
3336 
3337  if (Constraint.size() == 1) {
3338  switch (Constraint[0]) {
3339  case 'r':
3340  if (VT == MVT::v2i32)
3341  return std::make_pair(0U, &SP::IntPairRegClass);
3342  else if (Subtarget->is64Bit())
3343  return std::make_pair(0U, &SP::I64RegsRegClass);
3344  else
3345  return std::make_pair(0U, &SP::IntRegsRegClass);
3346  case 'f':
3347  if (VT == MVT::f32 || VT == MVT::i32)
3348  return std::make_pair(0U, &SP::FPRegsRegClass);
3349  else if (VT == MVT::f64 || VT == MVT::i64)
3350  return std::make_pair(0U, &SP::LowDFPRegsRegClass);
3351  else if (VT == MVT::f128)
3352  return std::make_pair(0U, &SP::LowQFPRegsRegClass);
3353  // This will generate an error message
3354  return std::make_pair(0U, nullptr);
3355  case 'e':
3356  if (VT == MVT::f32 || VT == MVT::i32)
3357  return std::make_pair(0U, &SP::FPRegsRegClass);
3358  else if (VT == MVT::f64 || VT == MVT::i64 )
3359  return std::make_pair(0U, &SP::DFPRegsRegClass);
3360  else if (VT == MVT::f128)
3361  return std::make_pair(0U, &SP::QFPRegsRegClass);
3362  // This will generate an error message
3363  return std::make_pair(0U, nullptr);
3364  }
3365  }
3366 
3367  if (Constraint.front() != '{')
3368  return std::make_pair(0U, nullptr);
3369 
3370  assert(Constraint.back() == '}' && "Not a brace enclosed constraint?");
3371  StringRef RegName(Constraint.data() + 1, Constraint.size() - 2);
3372  if (RegName.empty())
3373  return std::make_pair(0U, nullptr);
3374 
3375  unsigned long long RegNo;
3376  // Handle numbered register aliases.
3377  if (RegName[0] == 'r' &&
3378  getAsUnsignedInteger(RegName.begin() + 1, 10, RegNo)) {
3379  // r0-r7 -> g0-g7
3380  // r8-r15 -> o0-o7
3381  // r16-r23 -> l0-l7
3382  // r24-r31 -> i0-i7
3383  if (RegNo > 31)
3384  return std::make_pair(0U, nullptr);
3385  const char RegTypes[] = {'g', 'o', 'l', 'i'};
3386  char RegType = RegTypes[RegNo / 8];
3387  char RegIndex = '0' + (RegNo % 8);
3388  char Tmp[] = {'{', RegType, RegIndex, '}', 0};
3389  return getRegForInlineAsmConstraint(TRI, Tmp, VT);
3390  }
3391 
3392  // Rewrite the fN constraint according to the value type if needed.
3393  if (VT != MVT::f32 && VT != MVT::Other && RegName[0] == 'f' &&
3394  getAsUnsignedInteger(RegName.begin() + 1, 10, RegNo)) {
3395  if (VT == MVT::f64 && (RegNo % 2 == 0)) {
3397  TRI, StringRef("{d" + utostr(RegNo / 2) + "}"), VT);
3398  } else if (VT == MVT::f128 && (RegNo % 4 == 0)) {
3400  TRI, StringRef("{q" + utostr(RegNo / 4) + "}"), VT);
3401  } else {
3402  return std::make_pair(0U, nullptr);
3403  }
3404  }
3405 
3406  auto ResultPair =
3408  if (!ResultPair.second)
3409  return std::make_pair(0U, nullptr);
3410 
3411  // Force the use of I64Regs over IntRegs for 64-bit values.
3412  if (Subtarget->is64Bit() && VT == MVT::i64) {
3413  assert(ResultPair.second == &SP::IntRegsRegClass &&
3414  "Unexpected register class");
3415  return std::make_pair(ResultPair.first, &SP::I64RegsRegClass);
3416  }
3417 
3418  return ResultPair;
3419 }
3420 
3421 bool
3423  // The Sparc target isn't yet aware of offsets.
3424  return false;
3425 }
3426 
3429  SelectionDAG &DAG) const {
3430 
3431  SDLoc dl(N);
3432 
3433  RTLIB::Libcall libCall = RTLIB::UNKNOWN_LIBCALL;
3434 
3435  switch (N->getOpcode()) {
3436  default:
3437  llvm_unreachable("Do not know how to custom type legalize this operation!");
3438 
3439  case ISD::FP_TO_SINT:
3440  case ISD::FP_TO_UINT:
3441  // Custom lower only if it involves f128 or i64.
3442  if (N->getOperand(0).getValueType() != MVT::f128
3443  || N->getValueType(0) != MVT::i64)
3444  return;
3445  libCall = ((N->getOpcode() == ISD::FP_TO_SINT)
3446  ? RTLIB::FPTOSINT_F128_I64
3447  : RTLIB::FPTOUINT_F128_I64);
3448 
3449  Results.push_back(LowerF128Op(SDValue(N, 0),
3450  DAG,
3451  getLibcallName(libCall),
3452  1));
3453  return;
3454  case ISD::READCYCLECOUNTER: {
3455  assert(Subtarget->hasLeonCycleCounter());
3456  SDValue Lo = DAG.getCopyFromReg(N->getOperand(0), dl, SP::ASR23, MVT::i32);
3457  SDValue Hi = DAG.getCopyFromReg(Lo, dl, SP::G0, MVT::i32);
3458  SDValue Ops[] = { Lo, Hi };
3459  SDValue Pair = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Ops);
3460  Results.push_back(Pair);
3461  Results.push_back(N->getOperand(0));
3462  return;
3463  }
3464  case ISD::SINT_TO_FP:
3465  case ISD::UINT_TO_FP:
3466  // Custom lower only if it involves f128 or i64.
3467  if (N->getValueType(0) != MVT::f128
3468  || N->getOperand(0).getValueType() != MVT::i64)
3469  return;
3470 
3471  libCall = ((N->getOpcode() == ISD::SINT_TO_FP)
3472  ? RTLIB::SINTTOFP_I64_F128
3473  : RTLIB::UINTTOFP_I64_F128);
3474 
3475  Results.push_back(LowerF128Op(SDValue(N, 0),
3476  DAG,
3477  getLibcallName(libCall),
3478  1));
3479  return;
3480  case ISD::LOAD: {
3481  LoadSDNode *Ld = cast<LoadSDNode>(N);
3482  // Custom handling only for i64: turn i64 load into a v2i32 load,
3483  // and a bitcast.
3484  if (Ld->getValueType(0) != MVT::i64 || Ld->getMemoryVT() != MVT::i64)
3485  return;
3486 
3487  SDLoc dl(N);
3488  SDValue LoadRes = DAG.getExtLoad(
3489  Ld->getExtensionType(), dl, MVT::v2i32, Ld->getChain(),
3490  Ld->getBasePtr(), Ld->getPointerInfo(), MVT::v2i32,
3491  Ld->getOriginalAlign(), Ld->getMemOperand()->getFlags(),
3492  Ld->getAAInfo());
3493 
3494  SDValue Res = DAG.getNode(ISD::BITCAST, dl, MVT::i64, LoadRes);
3495  Results.push_back(Res);
3496  Results.push_back(LoadRes.getValue(1));
3497  return;
3498  }
3499  }
3500 }
3501 
3502 // Override to enable LOAD_STACK_GUARD lowering on Linux.
3504  if (!Subtarget->isTargetLinux())
3506  return true;
3507 }
3508 
3509 // Override to disable global variable loading on Linux.
3511  if (!Subtarget->isTargetLinux())
3513 }
llvm::ISD::SUB
@ SUB
Definition: ISDOpcodes.h:240
llvm::StringSwitch::Case
StringSwitch & Case(StringLiteral S, T Value)
Definition: StringSwitch.h:69
llvm::CCValAssign::getLocVT
MVT getLocVT() const
Definition: CallingConvLower.h:151
llvm::MachineRegisterInfo::addLiveIn
void addLiveIn(MCRegister Reg, Register vreg=Register())
addLiveIn - Add the specified register as a live-in.
Definition: MachineRegisterInfo.h:954
llvm::SparcMCExpr::VK_Sparc_TLS_IE_LO10
@ VK_Sparc_TLS_IE_LO10
Definition: SparcMCExpr.h:56
i
i
Definition: README.txt:29
llvm::ISD::SETUGE
@ SETUGE
Definition: ISDOpcodes.h:1424
llvm::SparcRegisterInfo
Definition: SparcRegisterInfo.h:22
llvm::alignTo
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition: Alignment.h:156
L5
to esp esp setne al movzbw ax esp setg cl movzbw cx cmove cx cl jne LBB1_2 esp which is much esp edx eax decl edx jle L7 L5
Definition: README.txt:656
llvm::LoadSDNode::getOffset
const SDValue & getOffset() const
Definition: SelectionDAGNodes.h:2334
llvm::SPISD::GLOBAL_BASE_REG
@ GLOBAL_BASE_REG
Definition: SparcISelLowering.h:44
llvm::CCValAssign::ZExt
@ ZExt
Definition: CallingConvLower.h:36
llvm::StringRef::back
LLVM_NODISCARD char back() const
back - Get the last character in the string.
Definition: StringRef.h:168
llvm::SPISD::TLS_ADD
@ TLS_ADD
Definition: SparcISelLowering.h:49
toCallerWindow
static unsigned toCallerWindow(unsigned Reg)
Definition: SparcISelLowering.cpp:186
llvm::Argument
This class represents an incoming formal argument to a Function.
Definition: Argument.h:28
llvm::ConstantSDNode
Definition: SelectionDAGNodes.h:1564
llvm::RegisterSDNode
Definition: SelectionDAGNodes.h:2131
LowerATOMIC_LOAD_STORE
static SDValue LowerATOMIC_LOAD_STORE(SDValue Op, SelectionDAG &DAG)
Definition: SparcISelLowering.cpp:3050
llvm::StoreSDNode::getBasePtr
const SDValue & getBasePtr() const
Definition: SelectionDAGNodes.h:2364
llvm::RISCVAttrs::StackAlign
StackAlign
Definition: RISCVAttributes.h:37
LowerVASTART
static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI)
Definition: SparcISelLowering.cpp:2562
SparcRegisterInfo.h
llvm::SPCC::ICC_CS
@ ICC_CS
Definition: Sparc.h:52
llvm::SparcMCExpr::VK_Sparc_TLS_IE_ADD
@ VK_Sparc_TLS_IE_ADD
Definition: SparcMCExpr.h:59
llvm::SelectionDAG::getCALLSEQ_START
SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)
Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...
Definition: SelectionDAG.h:943
llvm::ISD::SETLE
@ SETLE
Definition: ISDOpcodes.h:1435
llvm::SPISD::ITOF
@ ITOF
Definition: SparcISelLowering.h:38
llvm::ISD::SETO
@ SETO
Definition: ISDOpcodes.h:1420
MI
IRTranslator LLVM IR MI
Definition: IRTranslator.cpp:104
llvm::ISD::UMULO
@ UMULO
Definition: ISDOpcodes.h:332
llvm::MachineInstrBuilder::addImm
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
Definition: MachineInstrBuilder.h:131
llvm
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:17
llvm::ISD::ArgFlagsTy::isSRet
bool isSRet() const
Definition: TargetCallingConv.h:82
llvm::SparcMCExpr::VK_Sparc_L44
@ VK_Sparc_L44
Definition: SparcMCExpr.h:31
M
We currently emits eax Perhaps this is what we really should generate is Is imull three or four cycles eax eax The current instruction priority is based on pattern complexity The former is more complex because it folds a load so the latter will not be emitted Perhaps we should use AddedComplexity to give LEA32r a higher priority We should always try to match LEA first since the LEA matching code does some estimate to determine whether the match is profitable if we care more about code then imull is better It s two bytes shorter than movl leal On a Pentium M
Definition: README.txt:252
llvm::SDNode::getValueType
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
Definition: SelectionDAGNodes.h:970
llvm::PICLevel::SmallPIC
@ SmallPIC
Definition: CodeGen.h:33
llvm::SDLoc
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Definition: SelectionDAGNodes.h:1090
llvm::SPCC::FCC_G
@ FCC_G
Definition: Sparc.h:61
llvm::CCValAssign::Full
@ Full
Definition: CallingConvLower.h:34
llvm::TargetLoweringBase::Legal
@ Legal
Definition: TargetLowering.h:196
llvm::DataLayout
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:113
llvm::ISD::OR
@ OR
Definition: ISDOpcodes.h:667
llvm::SparcTargetLowering::LowerConstantPool
SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const
Definition: SparcISelLowering.cpp:2065
llvm::TargetMachine::useEmulatedTLS
bool useEmulatedTLS() const
Returns true if this target uses emulated TLS.
Definition: TargetMachine.cpp:150
llvm::ISD::SETGT
@ SETGT
Definition: ISDOpcodes.h:1432
llvm::ISD::BITCAST
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
Definition: ISDOpcodes.h:886
llvm::ISD::SETNE
@ SETNE
Definition: ISDOpcodes.h:1436
llvm::MachineRegisterInfo::createVirtualRegister
Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
Definition: MachineRegisterInfo.cpp:156
llvm::TargetLowering::getSingleConstraintMatchWeight
virtual ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const
Examine constraint string and operand type and determine a weight value.
Definition: TargetLowering.cpp:5391
llvm::MachineFrameInfo::setReturnAddressIsTaken
void setReturnAddressIsTaken(bool s)
Definition: MachineFrameInfo.h:377
llvm::TargetLowering::ConstraintType
ConstraintType
Definition: TargetLowering.h:4419
llvm::ISD::BR_JT
@ BR_JT
BR_JT - Jumptable branch.
Definition: ISDOpcodes.h:991
llvm::KnownBits::resetAll
void resetAll()
Resets the known state of all bits.
Definition: KnownBits.h:66
llvm::SparcTargetLowering::isOffsetFoldingLegal
bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override
Return true if folding a constant offset with the given GlobalAddress is legal.
Definition: SparcISelLowering.cpp:3422
llvm::TargetLowering::CallLoweringInfo::setChain
CallLoweringInfo & setChain(SDValue InChain)
Definition: TargetLowering.h:4053
llvm::SparcMCExpr::VK_Sparc_HM
@ VK_Sparc_HM
Definition: SparcMCExpr.h:33
llvm::CCState
CCState - This class holds information needed while lowering arguments and return values.
Definition: CallingConvLower.h:189
llvm::ISD::AssertSext
@ AssertSext
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
Definition: ISDOpcodes.h:61
llvm::SelectionDAG::getCopyToReg
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, unsigned Reg, SDValue N)
Definition: SelectionDAG.h:750
llvm::SPCC::FCC_LE
@ FCC_LE
Definition: Sparc.h:71
llvm::EVT::getFixedSizeInBits
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
Definition: ValueTypes.h:348
llvm::MachineRegisterInfo
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Definition: MachineRegisterInfo.h:50
llvm::SDValue::getNode
SDNode * getNode() const
get the SDNode which holds the desired result
Definition: SelectionDAGNodes.h:151
llvm::isOneConstant
bool isOneConstant(SDValue V)
Returns true if V is a constant integer one.
Definition: SelectionDAG.cpp:10523
llvm::Function
Definition: Function.h:60
llvm::SPCC::ICC_LE
@ ICC_LE
Definition: Sparc.h:46
llvm::ISD::BSWAP
@ BSWAP
Byte Swap and Counting operators.
Definition: ISDOpcodes.h:700
llvm::StringSwitch::Default
LLVM_NODISCARD R Default(T Value)
Definition: StringSwitch.h:183
llvm::ISD::UDIV
@ UDIV
Definition: ISDOpcodes.h:243
is64Bit
static bool is64Bit(const char *name)
Definition: X86Disassembler.cpp:1018
llvm::MVT::i128
@ i128
Definition: MachineValueType.h:50
llvm::ISD::DYNAMIC_STACKALLOC
@ DYNAMIC_STACKALLOC
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary.
Definition: ISDOpcodes.h:976
llvm::SelectionDAG::getValueType
SDValue getValueType(EVT)
Definition: SelectionDAG.cpp:1798
llvm::CCState::addLoc
void addLoc(const CCValAssign &V)
Definition: CallingConvLower.h:251
IntCondCCodeToICC
static SPCC::CondCodes IntCondCCodeToICC(ISD::CondCode CC)
IntCondCCodeToICC - Convert a DAG integer condition code to a SPARC ICC condition.
Definition: SparcISelLowering.cpp:1410
CC_Sparc64_Full
static bool CC_Sparc64_Full(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
Definition: SparcISelLowering.cpp:104
llvm::SparcSubtarget::isV9
bool isV9() const
Definition: SparcSubtarget.h:81
llvm::ISD::ADDC
@ ADDC
Carry-setting nodes for multiple precision addition and subtraction.
Definition: ISDOpcodes.h:269
llvm::SPCC::FCC_UE
@ FCC_UE
Definition: Sparc.h:68
llvm::TLSModel::GeneralDynamic
@ GeneralDynamic
Definition: CodeGen.h:43
llvm::CodeModel::Medium
@ Medium
Definition: CodeGen.h:28
llvm::AtomicRMWInst::getOperation
BinOp getOperation() const
Definition: Instructions.h:792
llvm::TargetLoweringBase::setMinCmpXchgSizeInBits
void setMinCmpXchgSizeInBits(unsigned SizeInBits)
Sets the minimum cmpxchg or ll/sc size supported by the backend.
Definition: TargetLowering.h:2479
llvm::SelectionDAG::getFrameIndex
SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
Definition: SelectionDAG.cpp:1679
llvm::SmallVector
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1185
llvm::ISD::SETEQ
@ SETEQ
Definition: ISDOpcodes.h:1431
llvm::SPCC::ICC_L
@ ICC_L
Definition: Sparc.h:48
llvm::ISD::STACKRESTORE
@ STACKRESTORE
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain.
Definition: ISDOpcodes.h:1057
llvm::SelectionDAG::getVTList
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
Definition: SelectionDAG.cpp:9108
llvm::SparcMCExpr::VK_Sparc_TLS_LE_LOX10
@ VK_Sparc_TLS_LE_LOX10
Definition: SparcMCExpr.h:61
llvm::SparcMachineFunctionInfo::setSRetReturnReg
void setSRetReturnReg(Register Reg)
Definition: SparcMachineFunctionInfo.h:53
ErrorHandling.h
llvm::SelectionDAG::getRoot
const SDValue & getRoot() const
Return the root tag of the SelectionDAG.
Definition: SelectionDAG.h:528
llvm::X86Disassembler::Reg
Reg
All possible values of the reg field in the ModR/M byte.
Definition: X86DisassemblerDecoder.h:462
llvm::SparcTargetLowering::getRegisterByName
Register getRegisterByName(const char *RegName, LLT VT, const MachineFunction &MF) const override
Return the register ID of the name passed in.
Definition: SparcISelLowering.cpp:1064
llvm::SparcMCExpr::VK_Sparc_TLS_LDO_ADD
@ VK_Sparc_TLS_LDO_ADD
Definition: SparcMCExpr.h:54
llvm::APInt::zextOrTrunc
APInt zextOrTrunc(unsigned width) const
Zero extend or truncate to width.
Definition: APInt.cpp:994
llvm::SparcTargetLowering::useLoadStackGuardNode
bool useLoadStackGuardNode() const override
Override to support customized stack guard loading.
Definition: SparcISelLowering.cpp:3503
llvm::MemSDNode::getMemoryVT
EVT getMemoryVT() const
Return the type of the in-memory value.
Definition: SelectionDAGNodes.h:1341
llvm::MemSDNode::getChain
const SDValue & getChain() const
Definition: SelectionDAGNodes.h:1364
llvm::ISD::ANY_EXTEND
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
Definition: ISDOpcodes.h:766
llvm::SDNode
Represents one node in the SelectionDAG.
Definition: SelectionDAGNodes.h:454
llvm::TargetLoweringBase::getLibcallName
const char * getLibcallName(RTLIB::Libcall Call) const
Get the libcall routine name for the specified libcall.
Definition: TargetLowering.h:3080
llvm::TargetLowering::CW_Constant
@ CW_Constant
Definition: TargetLowering.h:4441
llvm::ISD::FMA
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
Definition: ISDOpcodes.h:482
llvm::ISD::FP_TO_SINT
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
Definition: ISDOpcodes.h:819
llvm::TargetLowering::DAGCombinerInfo::DAG
SelectionDAG & DAG
Definition: TargetLowering.h:3812
llvm::SparcTargetLowering::LowerINTRINSIC_WO_CHAIN
SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const
Definition: SparcISelLowering.cpp:3060
llvm::LoadSDNode
This class is used to represent ISD::LOAD nodes.
Definition: SelectionDAGNodes.h:2314
llvm::TargetLowering::CallLoweringInfo::setCallee
CallLoweringInfo & setCallee(CallingConv::ID CC, Type *ResultType, SDValue Target, ArgListTy &&ArgsList)
Definition: TargetLowering.h:4072
llvm::MVT::Glue
@ Glue
Definition: MachineValueType.h:270
llvm::SparcSubtarget::getRegisterInfo
const SparcRegisterInfo * getRegisterInfo() const override
Definition: SparcSubtarget.h:68
llvm::TargetRegisterInfo
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
Definition: TargetRegisterInfo.h:234
llvm::Depth
@ Depth
Definition: SIMachineScheduler.h:36
llvm::TargetLowering::isPositionIndependent
bool isPositionIndependent() const
Definition: TargetLowering.cpp:45
llvm::SPCC::FCC_UG
@ FCC_UG
Definition: Sparc.h:62
llvm::ISD::SETULE
@ SETULE
Definition: ISDOpcodes.h:1426
llvm::CCState::AnalyzeFormalArguments
void AnalyzeFormalArguments(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
AnalyzeFormalArguments - Analyze an array of argument values, incorporating info about the formals in...
Definition: CallingConvLower.cpp:82
llvm::Type
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
llvm::RTLIB::Libcall
Libcall
RTLIB::Libcall enum - This enum defines all of the runtime library calls the backend can emit.
Definition: RuntimeLibcalls.h:30
llvm::SparcTargetLowering::LowerF128_LibCallArg
SDValue LowerF128_LibCallArg(SDValue Chain, ArgListTy &Args, SDValue Arg, const SDLoc &DL, SelectionDAG &DAG) const
Definition: SparcISelLowering.cpp:2180
Module.h
llvm::SPCC::FCC_O
@ FCC_O
Definition: Sparc.h:73
llvm::ISD::SHL_PARTS
@ SHL_PARTS
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.
Definition: ISDOpcodes.h:749
llvm::SelectionDAG::getStore
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
Definition: SelectionDAG.cpp:7804
llvm::SPCC::ICC_LEU
@ ICC_LEU
Definition: Sparc.h:50
llvm::ISD::SETCC
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
Definition: ISDOpcodes.h:736
llvm::SparcTargetLowering::LowerReturn_32
SDValue LowerReturn_32(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, const SmallVectorImpl< SDValue > &OutVals, const SDLoc &DL, SelectionDAG &DAG) const
Definition: SparcISelLowering.cpp:206
llvm::SPISD::BRXCC
@ BRXCC
Definition: SparcISelLowering.h:29
llvm::SparcTargetLowering
Definition: SparcISelLowering.h:57
llvm::SparcSubtarget::getFrameLowering
const TargetFrameLowering * getFrameLowering() const override
Definition: SparcSubtarget.h:65
llvm::TargetLowering::CallLoweringInfo::CB
const CallBase * CB
Definition: TargetLowering.h:4036
llvm::CCValAssign::BCvt
@ BCvt
Definition: CallingConvLower.h:44
llvm::TargetLoweringBase::setMinFunctionAlignment
void setMinFunctionAlignment(Align Alignment)
Set the target's minimum function alignment.
Definition: TargetLowering.h:2447
llvm::TargetLowering::LowerCallTo
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
This function lowers an abstract call to a function into an actual call.
Definition: SelectionDAGBuilder.cpp:9697
LowerF128_FPEXTEND
static SDValue LowerF128_FPEXTEND(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI)
Definition: SparcISelLowering.cpp:2359
llvm::SparcMCExpr::VK_Sparc_TLS_LDM_ADD
@ VK_Sparc_TLS_LDM_ADD
Definition: SparcMCExpr.h:50
llvm::ore::NV
DiagnosticInfoOptimizationBase::Argument NV
Definition: OptimizationRemarkEmitter.h:136
llvm::tgtok::FalseVal
@ FalseVal
Definition: TGLexer.h:62
Results
Function Alias Analysis Results
Definition: AliasAnalysis.cpp:848
llvm::TargetLoweringBase::getVectorIdxTy
virtual MVT getVectorIdxTy(const DataLayout &DL) const
Returns the type to be used for the index operand of: ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT...
Definition: TargetLowering.h:408
fixupVariableFloatArgs
static void fixupVariableFloatArgs(SmallVectorImpl< CCValAssign > &ArgLocs, ArrayRef< ISD::OutputArg > Outs)
Definition: SparcISelLowering.cpp:1091
llvm::max
Expected< ExpressionValue > max(const ExpressionValue &Lhs, const ExpressionValue &Rhs)
Definition: FileCheck.cpp:337
llvm::MipsISD::Ret
@ Ret
Definition: MipsISelLowering.h:119
llvm::ISD::VAEND
@ VAEND
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE.
Definition: ISDOpcodes.h:1086
llvm::ISD::EXTLOAD
@ EXTLOAD
Definition: ISDOpcodes.h:1391
llvm::APInt::lshr
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
Definition: APInt.h:832
RHS
Value * RHS
Definition: X86PartialReduction.cpp:76
llvm::SPCC::CondCodes
CondCodes
Definition: Sparc.h:40
llvm::ISD::SETOEQ
@ SETOEQ
Definition: ISDOpcodes.h:1414
llvm::BlockAddressSDNode
Definition: SelectionDAGNodes.h:2165
llvm::SparcMCExpr::VK_Sparc_TLS_LDM_HI22
@ VK_Sparc_TLS_LDM_HI22
Definition: SparcMCExpr.h:48
SelectionDAG.h
TRI
unsigned const TargetRegisterInfo * TRI
Definition: MachineSink.cpp:1628
SparcISelLowering.h
llvm::SparcTargetLowering::EmitInstrWithCustomInserter
MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *MBB) const override
This method should be implemented by targets that mark instructions with the 'usesCustomInserter' fla...
Definition: SparcISelLowering.cpp:3170
llvm::SPISD::FIRST_NUMBER
@ FIRST_NUMBER
Definition: SparcISelLowering.h:25
llvm::Type::getInt32Ty
static IntegerType * getInt32Ty(LLVMContext &C)
Definition: Type.cpp:239
llvm::ISD::SETUEQ
@ SETUEQ
Definition: ISDOpcodes.h:1422
llvm::CCState::AnalyzeCallOperands
void AnalyzeCallOperands(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
AnalyzeCallOperands - Analyze the outgoing arguments to a call, incorporating info about the passed v...
Definition: CallingConvLower.cpp:123
llvm::SelectionDAG::getContext
LLVMContext * getContext() const
Definition: SelectionDAG.h:462
llvm::ISD::FABS
@ FABS
Definition: ISDOpcodes.h:912
llvm::SparcMCExpr::VK_Sparc_M44
@ VK_Sparc_M44
Definition: SparcMCExpr.h:30
I1
@ I1
Definition: DXILOpLowering.cpp:37
F
#define F(x, y, z)
Definition: MD5.cpp:55
llvm::ISD::BRCOND
@ BRCOND
BRCOND - Conditional branch.
Definition: ISDOpcodes.h:1001
MachineRegisterInfo.h
KnownBits.h
getFRAMEADDR
static SDValue getFRAMEADDR(uint64_t depth, SDValue Op, SelectionDAG &DAG, const SparcSubtarget *Subtarget, bool AlwaysFlush=false)
Definition: SparcISelLowering.cpp:2678
llvm::SPISD::BRFCC
@ BRFCC
Definition: SparcISelLowering.h:30
llvm::SelectionDAG::getRegister
SDValue getRegister(unsigned Reg, EVT VT)
Definition: SelectionDAG.cpp:2061
llvm::BasicBlock
LLVM Basic Block Representation.
Definition: BasicBlock.h:55
llvm::SparcMCExpr::VK_Sparc_TLS_GD_CALL
@ VK_Sparc_TLS_GD_CALL
Definition: SparcMCExpr.h:47
SparcTargetObjectFile.h
LookThroughSetCC
static void LookThroughSetCC(SDValue &LHS, SDValue &RHS, ISD::CondCode CC, unsigned &SPCC)
Definition: SparcISelLowering.cpp:1943
CC_Sparc_Assign_SRet
static bool CC_Sparc_Assign_SRet(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
Definition: SparcISelLowering.cpp:41
llvm::ISD::BRIND
@ BRIND
BRIND - Indirect branch.
Definition: ISDOpcodes.h:987
llvm::ISD::ROTL
@ ROTL
Definition: ISDOpcodes.h:694
llvm::SPCC::FCC_ULE
@ FCC_ULE
Definition: Sparc.h:72
llvm::TargetLoweringBase::setTargetDAGCombine
void setTargetDAGCombine(ArrayRef< ISD::NodeType > NTs)
Targets should invoke this method for each target independent node that they want to provide a custom...
Definition: TargetLowering.h:2439
llvm::SPISD::FLUSHW
@ FLUSHW
Definition: SparcISelLowering.h:45
Arg
amdgpu Simplify well known AMD library false FunctionCallee Value * Arg
Definition: AMDGPULibCalls.cpp:186
llvm::MVT::integer_valuetypes
static auto integer_valuetypes()
Definition: MachineValueType.h:1461
llvm::MachineBasicBlock::addSuccessor
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
Definition: MachineBasicBlock.cpp:747
llvm::CallInfo
Definition: GVNHoist.cpp:217
CC_Sparc_Assign_Split_64
static bool CC_Sparc_Assign_Split_64(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
Definition: SparcISelLowering.cpp:54
llvm::BitmaskEnumDetail::Mask
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
Definition: BitmaskEnum.h:80
llvm::SPCC::FCC_LG
@ FCC_LG
Definition: Sparc.h:65
llvm::SPII::Load
@ Load
Definition: SparcInstrInfo.h:32
LHS
Value * LHS
Definition: X86PartialReduction.cpp:75
llvm::ISD::BR_CC
@ BR_CC
BR_CC - Conditional branch.
Definition: ISDOpcodes.h:1008
LowerF128_FPROUND
static SDValue LowerF128_FPROUND(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI)
Definition: SparcISelLowering.cpp:2375
llvm::ConstantInt
This is the shared class of boolean and integer constants.
Definition: Constants.h:79
llvm::SelectionDAG::getLoad
SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
Definition: SelectionDAG.cpp:7754
llvm::SparcMCExpr::VK_Sparc_TLS_LE_HIX22
@ VK_Sparc_TLS_LE_HIX22
Definition: SparcMCExpr.h:60
llvm::MVT::i1
@ i1
Definition: MachineValueType.h:43
llvm::SDNode::getOpcode
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
Definition: SelectionDAGNodes.h:632
llvm::TargetLowering::CallLoweringInfo::IsVarArg
bool IsVarArg
Definition: TargetLowering.h:4014
llvm::MachineFunction::getRegInfo
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Definition: MachineFunction.h:666
LowerFNEGorFABS
static SDValue LowerFNEGorFABS(SDValue Op, SelectionDAG &DAG, bool isV9)
Definition: SparcISelLowering.cpp:2901
llvm::CCValAssign::AExt
@ AExt
Definition: CallingConvLower.h:37
llvm::ISD::GlobalAddress
@ GlobalAddress
Definition: ISDOpcodes.h:78
llvm::ISD::SELECT_CC
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
Definition: ISDOpcodes.h:728
llvm::ExternalSymbolSDNode
Definition: SelectionDAGNodes.h:2207
llvm::TargetInstrInfo
TargetInstrInfo - Interface to description of machine instruction set.
Definition: TargetInstrInfo.h:97
LowerUMULO_SMULO
static SDValue LowerUMULO_SMULO(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI)
Definition: SparcISelLowering.cpp:2995
llvm::CCValAssign::isExtInLoc
bool isExtInLoc() const
Definition: CallingConvLower.h:154
llvm::SDValue::getValueType
EVT getValueType() const
Return the ValueType of the referenced return value.
Definition: SelectionDAGNodes.h:1125
llvm::SparcMCExpr::VK_Sparc_TLS_LDM_CALL
@ VK_Sparc_TLS_LDM_CALL
Definition: SparcMCExpr.h:51
llvm::CCValAssign
CCValAssign - Represent assignment of one arg/retval to a location.
Definition: CallingConvLower.h:31
llvm::ISD::CTLZ
@ CTLZ
Definition: ISDOpcodes.h:702
llvm::SPCC::ICC_CC
@ ICC_CC
Definition: Sparc.h:51
llvm::MachineInstrBuilder::addMBB
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
Definition: MachineInstrBuilder.h:146
L2
add sub stmia L5 ldr L2
Definition: README.txt:201
llvm::SelectionDAG
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
Definition: SelectionDAG.h:220
llvm::ISD::SELECT
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
Definition: ISDOpcodes.h:713
llvm::SPISD::Hi
@ Hi
Definition: SparcISelLowering.h:35
llvm::ISD::ZERO_EXTEND
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
Definition: ISDOpcodes.h:763
llvm::SparcTargetLowering::LowerReturn_64
SDValue LowerReturn_64(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, const SmallVectorImpl< SDValue > &OutVals, const SDLoc &DL, SelectionDAG &DAG) const
Definition: SparcISelLowering.cpp:291
llvm::ISD::ArgFlagsTy::isByVal
bool isByVal() const
Definition: TargetCallingConv.h:85
llvm::SparcTargetLowering::LowerFormalArguments_64
SDValue LowerFormalArguments_64(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::InputArg > &Ins, const SDLoc &dl, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals) const
Definition: SparcISelLowering.cpp:576
llvm::SelectionDAG::getTargetBlockAddress
SDValue getTargetBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, unsigned TargetFlags=0)
Definition: SelectionDAG.h:745
llvm::ISD::SETGE
@ SETGE
Definition: ISDOpcodes.h:1433
E
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
llvm::CCValAssign::getLocReg
Register getLocReg() const
Definition: CallingConvLower.h:148
llvm::MachineFunction::getInfo
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
Definition: MachineFunction.h:754
L3
AMD64 Optimization Manual has some nice information about optimizing integer multiplication by a constant How much of it applies to Intel s X86 implementation There are definite trade offs to xmm0 cvttss2siq rdx jb L3 subss xmm0 rax cvttss2siq rdx xorq rdx L3
Definition: README-X86-64.txt:22
llvm::ISD::SIGN_EXTEND_INREG
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
Definition: ISDOpcodes.h:781
llvm::TargetFrameLowering::getStackAlign
Align getStackAlign() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
Definition: TargetFrameLowering.h:100
llvm::EVT
Extended Value Type.
Definition: ValueTypes.h:34
C
(vector float) vec_cmpeq(*A, *B) C
Definition: README_ALTIVEC.txt:86
llvm::SparcMCExpr::VK_Sparc_H44
@ VK_Sparc_H44
Definition: SparcMCExpr.h:29
LowerRETURNADDR
static SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, const SparcSubtarget *Subtarget)
Definition: SparcISelLowering.cpp:2720
llvm::MVT::f64
@ f64
Definition: MachineValueType.h:58
llvm::SelectionDAG::getConstant
SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
Definition: SelectionDAG.cpp:1449
llvm::TargetLowering
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
Definition: TargetLowering.h:3412
llvm::getAsUnsignedInteger
bool getAsUnsignedInteger(StringRef Str, unsigned Radix, unsigned long long &Result)
Helper functions for StringRef::getAsInteger.
Definition: StringRef.cpp:492
llvm::SparcTargetLowering::makeAddress
SDValue makeAddress(SDValue Op, SelectionDAG &DAG) const
Definition: SparcISelLowering.cpp:2001
llvm::TargetLowering::DAGCombinerInfo
Definition: TargetLowering.h:3806
llvm::CCState::AnalyzeReturn
void AnalyzeReturn(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
AnalyzeReturn - Analyze the returned values of a return, incorporating info about the result values i...
Definition: CallingConvLower.cpp:110
SparcMCExpr.h
llvm::ISD::TRUNCATE
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
Definition: ISDOpcodes.h:769
llvm::SparcTargetLowering::SparcTargetLowering
SparcTargetLowering(const TargetMachine &TM, const SparcSubtarget &STI)
Definition: SparcISelLowering.cpp:1454
llvm::ISD::SRA
@ SRA
Definition: ISDOpcodes.h:692
llvm::SPCC::FCC_L
@ FCC_L
Definition: Sparc.h:63
llvm::SparcTargetLowering::bitcastConstantFPToInt
SDValue bitcastConstantFPToInt(ConstantFPSDNode *C, const SDLoc &DL, SelectionDAG &DAG) const
Definition: SparcISelLowering.cpp:3135
llvm::TargetLowering::CallLoweringInfo::setDebugLoc
CallLoweringInfo & setDebugLoc(const SDLoc &dl)
Definition: TargetLowering.h:4048
llvm::ISD::UDIVREM
@ UDIVREM
Definition: ISDOpcodes.h:256
llvm::MachinePointerInfo::getGOT
static MachinePointerInfo getGOT(MachineFunction &MF)
Return a MachinePointerInfo record that refers to a GOT entry.
Definition: MachineOperand.cpp:1015
TII
const HexagonInstrInfo * TII
Definition: HexagonCopyToCombine.cpp:125
llvm::TargetLoweringBase::addRegisterClass
void addRegisterClass(MVT VT,