LLVM  16.0.0git
SparcISelLowering.cpp
Go to the documentation of this file.
1 //===-- SparcISelLowering.cpp - Sparc DAG Lowering Implementation ---------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the interfaces that Sparc uses to lower LLVM code into a
10 // selection DAG.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "SparcISelLowering.h"
17 #include "SparcRegisterInfo.h"
18 #include "SparcTargetMachine.h"
19 #include "SparcTargetObjectFile.h"
20 #include "llvm/ADT/StringExtras.h"
21 #include "llvm/ADT/StringSwitch.h"
29 #include "llvm/IR/DerivedTypes.h"
30 #include "llvm/IR/Function.h"
31 #include "llvm/IR/Module.h"
33 #include "llvm/Support/KnownBits.h"
34 using namespace llvm;
35 
36 
37 //===----------------------------------------------------------------------===//
38 // Calling Convention Implementation
39 //===----------------------------------------------------------------------===//
40 
41 static bool CC_Sparc_Assign_SRet(unsigned &ValNo, MVT &ValVT,
42  MVT &LocVT, CCValAssign::LocInfo &LocInfo,
43  ISD::ArgFlagsTy &ArgFlags, CCState &State)
44 {
45  assert (ArgFlags.isSRet());
46 
47  // Assign SRet argument.
48  State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT,
49  0,
50  LocVT, LocInfo));
51  return true;
52 }
53 
54 static bool CC_Sparc_Assign_Split_64(unsigned &ValNo, MVT &ValVT,
55  MVT &LocVT, CCValAssign::LocInfo &LocInfo,
56  ISD::ArgFlagsTy &ArgFlags, CCState &State)
57 {
58  static const MCPhysReg RegList[] = {
59  SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5
60  };
61  // Try to get first reg.
62  if (Register Reg = State.AllocateReg(RegList)) {
63  State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
64  } else {
65  // Assign whole thing in stack.
67  ValNo, ValVT, State.AllocateStack(8, Align(4)), LocVT, LocInfo));
68  return true;
69  }
70 
71  // Try to get second reg.
72  if (Register Reg = State.AllocateReg(RegList))
73  State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
74  else
76  ValNo, ValVT, State.AllocateStack(4, Align(4)), LocVT, LocInfo));
77  return true;
78 }
79 
80 static bool CC_Sparc_Assign_Ret_Split_64(unsigned &ValNo, MVT &ValVT,
81  MVT &LocVT, CCValAssign::LocInfo &LocInfo,
82  ISD::ArgFlagsTy &ArgFlags, CCState &State)
83 {
84  static const MCPhysReg RegList[] = {
85  SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5
86  };
87 
88  // Try to get first reg.
89  if (Register Reg = State.AllocateReg(RegList))
90  State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
91  else
92  return false;
93 
94  // Try to get second reg.
95  if (Register Reg = State.AllocateReg(RegList))
96  State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
97  else
98  return false;
99 
100  return true;
101 }
102 
103 // Allocate a full-sized argument for the 64-bit ABI.
104 static bool Analyze_CC_Sparc64_Full(bool IsReturn, unsigned &ValNo, MVT &ValVT,
105  MVT &LocVT, CCValAssign::LocInfo &LocInfo,
106  ISD::ArgFlagsTy &ArgFlags, CCState &State) {
107  assert((LocVT == MVT::f32 || LocVT == MVT::f128
108  || LocVT.getSizeInBits() == 64) &&
109  "Can't handle non-64 bits locations");
110 
111  // Stack space is allocated for all arguments starting from [%fp+BIAS+128].
112  unsigned size = (LocVT == MVT::f128) ? 16 : 8;
113  Align alignment = (LocVT == MVT::f128) ? Align(16) : Align(8);
114  unsigned Offset = State.AllocateStack(size, alignment);
115  unsigned Reg = 0;
116 
117  if (LocVT == MVT::i64 && Offset < 6*8)
118  // Promote integers to %i0-%i5.
119  Reg = SP::I0 + Offset/8;
120  else if (LocVT == MVT::f64 && Offset < 16*8)
121  // Promote doubles to %d0-%d30. (Which LLVM calls D0-D15).
122  Reg = SP::D0 + Offset/8;
123  else if (LocVT == MVT::f32 && Offset < 16*8)
124  // Promote floats to %f1, %f3, ...
125  Reg = SP::F1 + Offset/4;
126  else if (LocVT == MVT::f128 && Offset < 16*8)
127  // Promote long doubles to %q0-%q28. (Which LLVM calls Q0-Q7).
128  Reg = SP::Q0 + Offset/16;
129 
130  // Promote to register when possible, otherwise use the stack slot.
131  if (Reg) {
132  State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
133  return true;
134  }
135 
136  // Bail out if this is a return CC and we run out of registers to place
137  // values into.
138  if (IsReturn)
139  return false;
140 
141  // This argument goes on the stack in an 8-byte slot.
142  // When passing floats, LocVT is smaller than 8 bytes. Adjust the offset to
143  // the right-aligned float. The first 4 bytes of the stack slot are undefined.
144  if (LocVT == MVT::f32)
145  Offset += 4;
146 
147  State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
148  return true;
149 }
150 
151 // Allocate a half-sized argument for the 64-bit ABI.
152 //
153 // This is used when passing { float, int } structs by value in registers.
154 static bool Analyze_CC_Sparc64_Half(bool IsReturn, unsigned &ValNo, MVT &ValVT,
155  MVT &LocVT, CCValAssign::LocInfo &LocInfo,
156  ISD::ArgFlagsTy &ArgFlags, CCState &State) {
157  assert(LocVT.getSizeInBits() == 32 && "Can't handle non-32 bits locations");
158  unsigned Offset = State.AllocateStack(4, Align(4));
159 
160  if (LocVT == MVT::f32 && Offset < 16*8) {
161  // Promote floats to %f0-%f31.
162  State.addLoc(CCValAssign::getReg(ValNo, ValVT, SP::F0 + Offset/4,
163  LocVT, LocInfo));
164  return true;
165  }
166 
167  if (LocVT == MVT::i32 && Offset < 6*8) {
168  // Promote integers to %i0-%i5, using half the register.
169  unsigned Reg = SP::I0 + Offset/8;
170  LocVT = MVT::i64;
171  LocInfo = CCValAssign::AExt;
172 
173  // Set the Custom bit if this i32 goes in the high bits of a register.
174  if (Offset % 8 == 0)
175  State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg,
176  LocVT, LocInfo));
177  else
178  State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
179  return true;
180  }
181 
182  // Bail out if this is a return CC and we run out of registers to place
183  // values into.
184  if (IsReturn)
185  return false;
186 
187  State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
188  return true;
189 }
190 
191 static bool CC_Sparc64_Full(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
192  CCValAssign::LocInfo &LocInfo,
193  ISD::ArgFlagsTy &ArgFlags, CCState &State) {
194  return Analyze_CC_Sparc64_Full(false, ValNo, ValVT, LocVT, LocInfo, ArgFlags,
195  State);
196 }
197 
198 static bool CC_Sparc64_Half(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
199  CCValAssign::LocInfo &LocInfo,
200  ISD::ArgFlagsTy &ArgFlags, CCState &State) {
201  return Analyze_CC_Sparc64_Half(false, ValNo, ValVT, LocVT, LocInfo, ArgFlags,
202  State);
203 }
204 
205 static bool RetCC_Sparc64_Full(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
206  CCValAssign::LocInfo &LocInfo,
207  ISD::ArgFlagsTy &ArgFlags, CCState &State) {
208  return Analyze_CC_Sparc64_Full(true, ValNo, ValVT, LocVT, LocInfo, ArgFlags,
209  State);
210 }
211 
212 static bool RetCC_Sparc64_Half(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
213  CCValAssign::LocInfo &LocInfo,
214  ISD::ArgFlagsTy &ArgFlags, CCState &State) {
215  return Analyze_CC_Sparc64_Half(true, ValNo, ValVT, LocVT, LocInfo, ArgFlags,
216  State);
217 }
218 
219 #include "SparcGenCallingConv.inc"
220 
221 // The calling conventions in SparcCallingConv.td are described in terms of the
222 // callee's register window. This function translates registers to the
223 // corresponding caller window %o register.
224 static unsigned toCallerWindow(unsigned Reg) {
225  static_assert(SP::I0 + 7 == SP::I7 && SP::O0 + 7 == SP::O7,
226  "Unexpected enum");
227  if (Reg >= SP::I0 && Reg <= SP::I7)
228  return Reg - SP::I0 + SP::O0;
229  return Reg;
230 }
231 
233  CallingConv::ID CallConv, MachineFunction &MF, bool isVarArg,
236  CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
237  return CCInfo.CheckReturn(Outs, Subtarget->is64Bit() ? RetCC_Sparc64
238  : RetCC_Sparc32);
239 }
240 
241 SDValue
243  bool IsVarArg,
245  const SmallVectorImpl<SDValue> &OutVals,
246  const SDLoc &DL, SelectionDAG &DAG) const {
247  if (Subtarget->is64Bit())
248  return LowerReturn_64(Chain, CallConv, IsVarArg, Outs, OutVals, DL, DAG);
249  return LowerReturn_32(Chain, CallConv, IsVarArg, Outs, OutVals, DL, DAG);
250 }
251 
252 SDValue
254  bool IsVarArg,
256  const SmallVectorImpl<SDValue> &OutVals,
257  const SDLoc &DL, SelectionDAG &DAG) const {
259 
260  // CCValAssign - represent the assignment of the return value to locations.
262 
263  // CCState - Info about the registers and stack slot.
264  CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
265  *DAG.getContext());
266 
267  // Analyze return values.
268  CCInfo.AnalyzeReturn(Outs, RetCC_Sparc32);
269 
270  SDValue Flag;
271  SmallVector<SDValue, 4> RetOps(1, Chain);
272  // Make room for the return address offset.
273  RetOps.push_back(SDValue());
274 
275  // Copy the result values into the output registers.
276  for (unsigned i = 0, realRVLocIdx = 0;
277  i != RVLocs.size();
278  ++i, ++realRVLocIdx) {
279  CCValAssign &VA = RVLocs[i];
280  assert(VA.isRegLoc() && "Can only return in registers!");
281 
282  SDValue Arg = OutVals[realRVLocIdx];
283 
284  if (VA.needsCustom()) {
285  assert(VA.getLocVT() == MVT::v2i32);
286  // Legalize ret v2i32 -> ret 2 x i32 (Basically: do what would
287  // happen by default if this wasn't a legal type)
288 
290  Arg,
291  DAG.getConstant(0, DL, getVectorIdxTy(DAG.getDataLayout())));
293  Arg,
294  DAG.getConstant(1, DL, getVectorIdxTy(DAG.getDataLayout())));
295 
296  Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Part0, Flag);
297  Flag = Chain.getValue(1);
298  RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
299  VA = RVLocs[++i]; // skip ahead to next loc
300  Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Part1,
301  Flag);
302  } else
303  Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Arg, Flag);
304 
305  // Guarantee that all emitted copies are stuck together with flags.
306  Flag = Chain.getValue(1);
307  RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
308  }
309 
310  unsigned RetAddrOffset = 8; // Call Inst + Delay Slot
311  // If the function returns a struct, copy the SRetReturnReg to I0
312  if (MF.getFunction().hasStructRetAttr()) {
314  Register Reg = SFI->getSRetReturnReg();
315  if (!Reg)
316  llvm_unreachable("sret virtual register not created in the entry block");
317  auto PtrVT = getPointerTy(DAG.getDataLayout());
318  SDValue Val = DAG.getCopyFromReg(Chain, DL, Reg, PtrVT);
319  Chain = DAG.getCopyToReg(Chain, DL, SP::I0, Val, Flag);
320  Flag = Chain.getValue(1);
321  RetOps.push_back(DAG.getRegister(SP::I0, PtrVT));
322  RetAddrOffset = 12; // CallInst + Delay Slot + Unimp
323  }
324 
325  RetOps[0] = Chain; // Update chain.
326  RetOps[1] = DAG.getConstant(RetAddrOffset, DL, MVT::i32);
327 
328  // Add the flag if we have it.
329  if (Flag.getNode())
330  RetOps.push_back(Flag);
331 
332  return DAG.getNode(SPISD::RET_FLAG, DL, MVT::Other, RetOps);
333 }
334 
335 // Lower return values for the 64-bit ABI.
336 // Return values are passed the exactly the same way as function arguments.
337 SDValue
339  bool IsVarArg,
341  const SmallVectorImpl<SDValue> &OutVals,
342  const SDLoc &DL, SelectionDAG &DAG) const {
343  // CCValAssign - represent the assignment of the return value to locations.
345 
346  // CCState - Info about the registers and stack slot.
347  CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
348  *DAG.getContext());
349 
350  // Analyze return values.
351  CCInfo.AnalyzeReturn(Outs, RetCC_Sparc64);
352 
353  SDValue Flag;
354  SmallVector<SDValue, 4> RetOps(1, Chain);
355 
356  // The second operand on the return instruction is the return address offset.
357  // The return address is always %i7+8 with the 64-bit ABI.
358  RetOps.push_back(DAG.getConstant(8, DL, MVT::i32));
359 
360  // Copy the result values into the output registers.
361  for (unsigned i = 0; i != RVLocs.size(); ++i) {
362  CCValAssign &VA = RVLocs[i];
363  assert(VA.isRegLoc() && "Can only return in registers!");
364  SDValue OutVal = OutVals[i];
365 
366  // Integer return values must be sign or zero extended by the callee.
367  switch (VA.getLocInfo()) {
368  case CCValAssign::Full: break;
369  case CCValAssign::SExt:
370  OutVal = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), OutVal);
371  break;
372  case CCValAssign::ZExt:
373  OutVal = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), OutVal);
374  break;
375  case CCValAssign::AExt:
376  OutVal = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), OutVal);
377  break;
378  default:
379  llvm_unreachable("Unknown loc info!");
380  }
381 
382  // The custom bit on an i32 return value indicates that it should be passed
383  // in the high bits of the register.
384  if (VA.getValVT() == MVT::i32 && VA.needsCustom()) {
385  OutVal = DAG.getNode(ISD::SHL, DL, MVT::i64, OutVal,
386  DAG.getConstant(32, DL, MVT::i32));
387 
388  // The next value may go in the low bits of the same register.
389  // Handle both at once.
390  if (i+1 < RVLocs.size() && RVLocs[i+1].getLocReg() == VA.getLocReg()) {
391  SDValue NV = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, OutVals[i+1]);
392  OutVal = DAG.getNode(ISD::OR, DL, MVT::i64, OutVal, NV);
393  // Skip the next value, it's already done.
394  ++i;
395  }
396  }
397 
398  Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), OutVal, Flag);
399 
400  // Guarantee that all emitted copies are stuck together with flags.
401  Flag = Chain.getValue(1);
402  RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
403  }
404 
405  RetOps[0] = Chain; // Update chain.
406 
407  // Add the flag if we have it.
408  if (Flag.getNode())
409  RetOps.push_back(Flag);
410 
411  return DAG.getNode(SPISD::RET_FLAG, DL, MVT::Other, RetOps);
412 }
413 
415  SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
417  SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
418  if (Subtarget->is64Bit())
419  return LowerFormalArguments_64(Chain, CallConv, IsVarArg, Ins,
420  DL, DAG, InVals);
421  return LowerFormalArguments_32(Chain, CallConv, IsVarArg, Ins,
422  DL, DAG, InVals);
423 }
424 
425 /// LowerFormalArguments32 - V8 uses a very simple ABI, where all values are
426 /// passed in either one or two GPRs, including FP values. TODO: we should
427 /// pass FP values in FP registers for fastcc functions.
429  SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
430  const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
431  SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
433  MachineRegisterInfo &RegInfo = MF.getRegInfo();
435 
436  // Assign locations to all of the incoming arguments.
438  CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
439  *DAG.getContext());
440  CCInfo.AnalyzeFormalArguments(Ins, CC_Sparc32);
441 
442  const unsigned StackOffset = 92;
443  bool IsLittleEndian = DAG.getDataLayout().isLittleEndian();
444 
445  unsigned InIdx = 0;
446  for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i, ++InIdx) {
447  CCValAssign &VA = ArgLocs[i];
448 
449  if (Ins[InIdx].Flags.isSRet()) {
450  if (InIdx != 0)
451  report_fatal_error("sparc only supports sret on the first parameter");
452  // Get SRet from [%fp+64].
453  int FrameIdx = MF.getFrameInfo().CreateFixedObject(4, 64, true);
454  SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);
455  SDValue Arg =
456  DAG.getLoad(MVT::i32, dl, Chain, FIPtr, MachinePointerInfo());
457  InVals.push_back(Arg);
458  continue;
459  }
460 
461  if (VA.isRegLoc()) {
462  if (VA.needsCustom()) {
463  assert(VA.getLocVT() == MVT::f64 || VA.getLocVT() == MVT::v2i32);
464 
465  Register VRegHi = RegInfo.createVirtualRegister(&SP::IntRegsRegClass);
466  MF.getRegInfo().addLiveIn(VA.getLocReg(), VRegHi);
467  SDValue HiVal = DAG.getCopyFromReg(Chain, dl, VRegHi, MVT::i32);
468 
469  assert(i+1 < e);
470  CCValAssign &NextVA = ArgLocs[++i];
471 
472  SDValue LoVal;
473  if (NextVA.isMemLoc()) {
474  int FrameIdx = MF.getFrameInfo().
475  CreateFixedObject(4, StackOffset+NextVA.getLocMemOffset(),true);
476  SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);
477  LoVal = DAG.getLoad(MVT::i32, dl, Chain, FIPtr, MachinePointerInfo());
478  } else {
479  Register loReg = MF.addLiveIn(NextVA.getLocReg(),
480  &SP::IntRegsRegClass);
481  LoVal = DAG.getCopyFromReg(Chain, dl, loReg, MVT::i32);
482  }
483 
484  if (IsLittleEndian)
485  std::swap(LoVal, HiVal);
486 
487  SDValue WholeValue =
488  DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, LoVal, HiVal);
489  WholeValue = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), WholeValue);
490  InVals.push_back(WholeValue);
491  continue;
492  }
493  Register VReg = RegInfo.createVirtualRegister(&SP::IntRegsRegClass);
494  MF.getRegInfo().addLiveIn(VA.getLocReg(), VReg);
495  SDValue Arg = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
496  if (VA.getLocVT() == MVT::f32)
497  Arg = DAG.getNode(ISD::BITCAST, dl, MVT::f32, Arg);
498  else if (VA.getLocVT() != MVT::i32) {
499  Arg = DAG.getNode(ISD::AssertSext, dl, MVT::i32, Arg,
500  DAG.getValueType(VA.getLocVT()));
501  Arg = DAG.getNode(ISD::TRUNCATE, dl, VA.getLocVT(), Arg);
502  }
503  InVals.push_back(Arg);
504  continue;
505  }
506 
507  assert(VA.isMemLoc());
508 
509  unsigned Offset = VA.getLocMemOffset()+StackOffset;
510  auto PtrVT = getPointerTy(DAG.getDataLayout());
511 
512  if (VA.needsCustom()) {
513  assert(VA.getValVT() == MVT::f64 || VA.getValVT() == MVT::v2i32);
514  // If it is double-word aligned, just load.
515  if (Offset % 8 == 0) {
516  int FI = MF.getFrameInfo().CreateFixedObject(8,
517  Offset,
518  true);
519  SDValue FIPtr = DAG.getFrameIndex(FI, PtrVT);
520  SDValue Load =
521  DAG.getLoad(VA.getValVT(), dl, Chain, FIPtr, MachinePointerInfo());
522  InVals.push_back(Load);
523  continue;
524  }
525 
526  int FI = MF.getFrameInfo().CreateFixedObject(4,
527  Offset,
528  true);
529  SDValue FIPtr = DAG.getFrameIndex(FI, PtrVT);
530  SDValue HiVal =
531  DAG.getLoad(MVT::i32, dl, Chain, FIPtr, MachinePointerInfo());
532  int FI2 = MF.getFrameInfo().CreateFixedObject(4,
533  Offset+4,
534  true);
535  SDValue FIPtr2 = DAG.getFrameIndex(FI2, PtrVT);
536 
537  SDValue LoVal =
538  DAG.getLoad(MVT::i32, dl, Chain, FIPtr2, MachinePointerInfo());
539 
540  if (IsLittleEndian)
541  std::swap(LoVal, HiVal);
542 
543  SDValue WholeValue =
544  DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, LoVal, HiVal);
545  WholeValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), WholeValue);
546  InVals.push_back(WholeValue);
547  continue;
548  }
549 
550  int FI = MF.getFrameInfo().CreateFixedObject(4,
551  Offset,
552  true);
553  SDValue FIPtr = DAG.getFrameIndex(FI, PtrVT);
554  SDValue Load ;
555  if (VA.getValVT() == MVT::i32 || VA.getValVT() == MVT::f32) {
556  Load = DAG.getLoad(VA.getValVT(), dl, Chain, FIPtr, MachinePointerInfo());
557  } else if (VA.getValVT() == MVT::f128) {
558  report_fatal_error("SPARCv8 does not handle f128 in calls; "
559  "pass indirectly");
560  } else {
561  // We shouldn't see any other value types here.
562  llvm_unreachable("Unexpected ValVT encountered in frame lowering.");
563  }
564  InVals.push_back(Load);
565  }
566 
567  if (MF.getFunction().hasStructRetAttr()) {
568  // Copy the SRet Argument to SRetReturnReg.
570  Register Reg = SFI->getSRetReturnReg();
571  if (!Reg) {
572  Reg = MF.getRegInfo().createVirtualRegister(&SP::IntRegsRegClass);
573  SFI->setSRetReturnReg(Reg);
574  }
575  SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), dl, Reg, InVals[0]);
576  Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Copy, Chain);
577  }
578 
579  // Store remaining ArgRegs to the stack if this is a varargs function.
580  if (isVarArg) {
581  static const MCPhysReg ArgRegs[] = {
582  SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5
583  };
584  unsigned NumAllocated = CCInfo.getFirstUnallocated(ArgRegs);
585  const MCPhysReg *CurArgReg = ArgRegs+NumAllocated, *ArgRegEnd = ArgRegs+6;
586  unsigned ArgOffset = CCInfo.getNextStackOffset();
587  if (NumAllocated == 6)
588  ArgOffset += StackOffset;
589  else {
590  assert(!ArgOffset);
591  ArgOffset = 68+4*NumAllocated;
592  }
593 
594  // Remember the vararg offset for the va_start implementation.
595  FuncInfo->setVarArgsFrameOffset(ArgOffset);
596 
597  std::vector<SDValue> OutChains;
598 
599  for (; CurArgReg != ArgRegEnd; ++CurArgReg) {
600  Register VReg = RegInfo.createVirtualRegister(&SP::IntRegsRegClass);
601  MF.getRegInfo().addLiveIn(*CurArgReg, VReg);
602  SDValue Arg = DAG.getCopyFromReg(DAG.getRoot(), dl, VReg, MVT::i32);
603 
604  int FrameIdx = MF.getFrameInfo().CreateFixedObject(4, ArgOffset,
605  true);
606  SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);
607 
608  OutChains.push_back(
609  DAG.getStore(DAG.getRoot(), dl, Arg, FIPtr, MachinePointerInfo()));
610  ArgOffset += 4;
611  }
612 
613  if (!OutChains.empty()) {
614  OutChains.push_back(Chain);
615  Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
616  }
617  }
618 
619  return Chain;
620 }
621 
622 // Lower formal arguments for the 64 bit ABI.
624  SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
626  SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
628 
629  // Analyze arguments according to CC_Sparc64.
631  CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), ArgLocs,
632  *DAG.getContext());
633  CCInfo.AnalyzeFormalArguments(Ins, CC_Sparc64);
634 
635  // The argument array begins at %fp+BIAS+128, after the register save area.
636  const unsigned ArgArea = 128;
637 
638  for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
639  CCValAssign &VA = ArgLocs[i];
640  if (VA.isRegLoc()) {
641  // This argument is passed in a register.
642  // All integer register arguments are promoted by the caller to i64.
643 
644  // Create a virtual register for the promoted live-in value.
645  Register VReg = MF.addLiveIn(VA.getLocReg(),
646  getRegClassFor(VA.getLocVT()));
647  SDValue Arg = DAG.getCopyFromReg(Chain, DL, VReg, VA.getLocVT());
648 
649  // Get the high bits for i32 struct elements.
650  if (VA.getValVT() == MVT::i32 && VA.needsCustom())
651  Arg = DAG.getNode(ISD::SRL, DL, VA.getLocVT(), Arg,
652  DAG.getConstant(32, DL, MVT::i32));
653 
654  // The caller promoted the argument, so insert an Assert?ext SDNode so we
655  // won't promote the value again in this function.
656  switch (VA.getLocInfo()) {
657  case CCValAssign::SExt:
658  Arg = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), Arg,
659  DAG.getValueType(VA.getValVT()));
660  break;
661  case CCValAssign::ZExt:
662  Arg = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), Arg,
663  DAG.getValueType(VA.getValVT()));
664  break;
665  default:
666  break;
667  }
668 
669  // Truncate the register down to the argument type.
670  if (VA.isExtInLoc())
671  Arg = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Arg);
672 
673  InVals.push_back(Arg);
674  continue;
675  }
676 
677  // The registers are exhausted. This argument was passed on the stack.
678  assert(VA.isMemLoc());
679  // The CC_Sparc64_Full/Half functions compute stack offsets relative to the
680  // beginning of the arguments area at %fp+BIAS+128.
681  unsigned Offset = VA.getLocMemOffset() + ArgArea;
682  unsigned ValSize = VA.getValVT().getSizeInBits() / 8;
683  // Adjust offset for extended arguments, SPARC is big-endian.
684  // The caller will have written the full slot with extended bytes, but we
685  // prefer our own extending loads.
686  if (VA.isExtInLoc())
687  Offset += 8 - ValSize;
688  int FI = MF.getFrameInfo().CreateFixedObject(ValSize, Offset, true);
689  InVals.push_back(
690  DAG.getLoad(VA.getValVT(), DL, Chain,
693  }
694 
695  if (!IsVarArg)
696  return Chain;
697 
698  // This function takes variable arguments, some of which may have been passed
699  // in registers %i0-%i5. Variable floating point arguments are never passed
700  // in floating point registers. They go on %i0-%i5 or on the stack like
701  // integer arguments.
702  //
703  // The va_start intrinsic needs to know the offset to the first variable
704  // argument.
705  unsigned ArgOffset = CCInfo.getNextStackOffset();
707  // Skip the 128 bytes of register save area.
708  FuncInfo->setVarArgsFrameOffset(ArgOffset + ArgArea +
709  Subtarget->getStackPointerBias());
710 
711  // Save the variable arguments that were passed in registers.
712  // The caller is required to reserve stack space for 6 arguments regardless
713  // of how many arguments were actually passed.
714  SmallVector<SDValue, 8> OutChains;
715  for (; ArgOffset < 6*8; ArgOffset += 8) {
716  Register VReg = MF.addLiveIn(SP::I0 + ArgOffset/8, &SP::I64RegsRegClass);
717  SDValue VArg = DAG.getCopyFromReg(Chain, DL, VReg, MVT::i64);
718  int FI = MF.getFrameInfo().CreateFixedObject(8, ArgOffset + ArgArea, true);
719  auto PtrVT = getPointerTy(MF.getDataLayout());
720  OutChains.push_back(
721  DAG.getStore(Chain, DL, VArg, DAG.getFrameIndex(FI, PtrVT),
723  }
724 
725  if (!OutChains.empty())
726  Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains);
727 
728  return Chain;
729 }
730 
731 SDValue
733  SmallVectorImpl<SDValue> &InVals) const {
734  if (Subtarget->is64Bit())
735  return LowerCall_64(CLI, InVals);
736  return LowerCall_32(CLI, InVals);
737 }
738 
739 static bool hasReturnsTwiceAttr(SelectionDAG &DAG, SDValue Callee,
740  const CallBase *Call) {
741  if (Call)
742  return Call->hasFnAttr(Attribute::ReturnsTwice);
743 
744  const Function *CalleeFn = nullptr;
745  if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
746  CalleeFn = dyn_cast<Function>(G->getGlobal());
747  } else if (ExternalSymbolSDNode *E =
748  dyn_cast<ExternalSymbolSDNode>(Callee)) {
749  const Function &Fn = DAG.getMachineFunction().getFunction();
750  const Module *M = Fn.getParent();
751  const char *CalleeName = E->getSymbol();
752  CalleeFn = M->getFunction(CalleeName);
753  }
754 
755  if (!CalleeFn)
756  return false;
757  return CalleeFn->hasFnAttribute(Attribute::ReturnsTwice);
758 }
759 
760 /// IsEligibleForTailCallOptimization - Check whether the call is eligible
761 /// for tail call optimization.
763  CCState &CCInfo, CallLoweringInfo &CLI, MachineFunction &MF) const {
764 
765  auto &Outs = CLI.Outs;
766  auto &Caller = MF.getFunction();
767 
768  // Do not tail call opt functions with "disable-tail-calls" attribute.
769  if (Caller.getFnAttribute("disable-tail-calls").getValueAsString() == "true")
770  return false;
771 
772  // Do not tail call opt if the stack is used to pass parameters.
773  // 64-bit targets have a slightly higher limit since the ABI requires
774  // to allocate some space even when all the parameters fit inside registers.
775  unsigned StackOffsetLimit = Subtarget->is64Bit() ? 48 : 0;
776  if (CCInfo.getNextStackOffset() > StackOffsetLimit)
777  return false;
778 
779  // Do not tail call opt if either the callee or caller returns
780  // a struct and the other does not.
781  if (!Outs.empty() && Caller.hasStructRetAttr() != Outs[0].Flags.isSRet())
782  return false;
783 
784  // Byval parameters hand the function a pointer directly into the stack area
785  // we want to reuse during a tail call.
786  for (auto &Arg : Outs)
787  if (Arg.Flags.isByVal())
788  return false;
789 
790  return true;
791 }
792 
793 // Lower a call for the 32-bit ABI.
794 SDValue
796  SmallVectorImpl<SDValue> &InVals) const {
797  SelectionDAG &DAG = CLI.DAG;
798  SDLoc &dl = CLI.DL;
800  SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
802  SDValue Chain = CLI.Chain;
803  SDValue Callee = CLI.Callee;
804  bool &isTailCall = CLI.IsTailCall;
805  CallingConv::ID CallConv = CLI.CallConv;
806  bool isVarArg = CLI.IsVarArg;
807 
808  // Analyze operands of the call, assigning locations to each operand.
810  CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
811  *DAG.getContext());
812  CCInfo.AnalyzeCallOperands(Outs, CC_Sparc32);
813 
814  isTailCall = isTailCall && IsEligibleForTailCallOptimization(
815  CCInfo, CLI, DAG.getMachineFunction());
816 
817  // Get the size of the outgoing arguments stack space requirement.
818  unsigned ArgsSize = CCInfo.getNextStackOffset();
819 
820  // Keep stack frames 8-byte aligned.
821  ArgsSize = (ArgsSize+7) & ~7;
822 
824 
825  // Create local copies for byval args.
826  SmallVector<SDValue, 8> ByValArgs;
827  for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
828  ISD::ArgFlagsTy Flags = Outs[i].Flags;
829  if (!Flags.isByVal())
830  continue;
831 
832  SDValue Arg = OutVals[i];
833  unsigned Size = Flags.getByValSize();
834  Align Alignment = Flags.getNonZeroByValAlign();
835 
836  if (Size > 0U) {
837  int FI = MFI.CreateStackObject(Size, Alignment, false);
838  SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
839  SDValue SizeNode = DAG.getConstant(Size, dl, MVT::i32);
840 
841  Chain = DAG.getMemcpy(Chain, dl, FIPtr, Arg, SizeNode, Alignment,
842  false, // isVolatile,
843  (Size <= 32), // AlwaysInline if size <= 32,
844  false, // isTailCall
846  ByValArgs.push_back(FIPtr);
847  }
848  else {
849  SDValue nullVal;
850  ByValArgs.push_back(nullVal);
851  }
852  }
853 
854  assert(!isTailCall || ArgsSize == 0);
855 
856  if (!isTailCall)
857  Chain = DAG.getCALLSEQ_START(Chain, ArgsSize, 0, dl);
858 
860  SmallVector<SDValue, 8> MemOpChains;
861 
862  const unsigned StackOffset = 92;
863  bool hasStructRetAttr = false;
864  unsigned SRetArgSize = 0;
865  // Walk the register/memloc assignments, inserting copies/loads.
866  for (unsigned i = 0, realArgIdx = 0, byvalArgIdx = 0, e = ArgLocs.size();
867  i != e;
868  ++i, ++realArgIdx) {
869  CCValAssign &VA = ArgLocs[i];
870  SDValue Arg = OutVals[realArgIdx];
871 
872  ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags;
873 
874  // Use local copy if it is a byval arg.
875  if (Flags.isByVal()) {
876  Arg = ByValArgs[byvalArgIdx++];
877  if (!Arg) {
878  continue;
879  }
880  }
881 
882  // Promote the value if needed.
883  switch (VA.getLocInfo()) {
884  default: llvm_unreachable("Unknown loc info!");
885  case CCValAssign::Full: break;
886  case CCValAssign::SExt:
887  Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
888  break;
889  case CCValAssign::ZExt:
890  Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
891  break;
892  case CCValAssign::AExt:
893  Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
894  break;
895  case CCValAssign::BCvt:
896  Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg);
897  break;
898  }
899 
900  if (Flags.isSRet()) {
901  assert(VA.needsCustom());
902 
903  if (isTailCall)
904  continue;
905 
906  // store SRet argument in %sp+64
907  SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
908  SDValue PtrOff = DAG.getIntPtrConstant(64, dl);
909  PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
910  MemOpChains.push_back(
911  DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
912  hasStructRetAttr = true;
913  // sret only allowed on first argument
914  assert(Outs[realArgIdx].OrigArgIndex == 0);
915  SRetArgSize =
916  DAG.getDataLayout().getTypeAllocSize(CLI.getArgs()[0].IndirectType);
917  continue;
918  }
919 
920  if (VA.needsCustom()) {
921  assert(VA.getLocVT() == MVT::f64 || VA.getLocVT() == MVT::v2i32);
922 
923  if (VA.isMemLoc()) {
924  unsigned Offset = VA.getLocMemOffset() + StackOffset;
925  // if it is double-word aligned, just store.
926  if (Offset % 8 == 0) {
927  SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
928  SDValue PtrOff = DAG.getIntPtrConstant(Offset, dl);
929  PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
930  MemOpChains.push_back(
931  DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
932  continue;
933  }
934  }
935 
936  if (VA.getLocVT() == MVT::f64) {
937  // Move from the float value from float registers into the
938  // integer registers.
939  if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Arg))
940  Arg = bitcastConstantFPToInt(C, dl, DAG);
941  else
942  Arg = DAG.getNode(ISD::BITCAST, dl, MVT::v2i32, Arg);
943  }
944 
946  Arg,
947  DAG.getConstant(0, dl, getVectorIdxTy(DAG.getDataLayout())));
949  Arg,
950  DAG.getConstant(1, dl, getVectorIdxTy(DAG.getDataLayout())));
951 
952  if (VA.isRegLoc()) {
953  RegsToPass.push_back(std::make_pair(VA.getLocReg(), Part0));
954  assert(i+1 != e);
955  CCValAssign &NextVA = ArgLocs[++i];
956  if (NextVA.isRegLoc()) {
957  RegsToPass.push_back(std::make_pair(NextVA.getLocReg(), Part1));
958  } else {
959  // Store the second part in stack.
960  unsigned Offset = NextVA.getLocMemOffset() + StackOffset;
961  SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
962  SDValue PtrOff = DAG.getIntPtrConstant(Offset, dl);
963  PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
964  MemOpChains.push_back(
965  DAG.getStore(Chain, dl, Part1, PtrOff, MachinePointerInfo()));
966  }
967  } else {
968  unsigned Offset = VA.getLocMemOffset() + StackOffset;
969  // Store the first part.
970  SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
971  SDValue PtrOff = DAG.getIntPtrConstant(Offset, dl);
972  PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
973  MemOpChains.push_back(
974  DAG.getStore(Chain, dl, Part0, PtrOff, MachinePointerInfo()));
975  // Store the second part.
976  PtrOff = DAG.getIntPtrConstant(Offset + 4, dl);
977  PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
978  MemOpChains.push_back(
979  DAG.getStore(Chain, dl, Part1, PtrOff, MachinePointerInfo()));
980  }
981  continue;
982  }
983 
984  // Arguments that can be passed on register must be kept at
985  // RegsToPass vector
986  if (VA.isRegLoc()) {
987  if (VA.getLocVT() != MVT::f32) {
988  RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
989  continue;
990  }
991  Arg = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg);
992  RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
993  continue;
994  }
995 
996  assert(VA.isMemLoc());
997 
998  // Create a store off the stack pointer for this argument.
999  SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
1001  dl);
1002  PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
1003  MemOpChains.push_back(
1004  DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
1005  }
1006 
1007 
1008  // Emit all stores, make sure the occur before any copies into physregs.
1009  if (!MemOpChains.empty())
1010  Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
1011 
1012  // Build a sequence of copy-to-reg nodes chained together with token
1013  // chain and flag operands which copy the outgoing args into registers.
1014  // The InFlag in necessary since all emitted instructions must be
1015  // stuck together.
1016  SDValue InFlag;
1017  for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
1018  Register Reg = RegsToPass[i].first;
1019  if (!isTailCall)
1020  Reg = toCallerWindow(Reg);
1021  Chain = DAG.getCopyToReg(Chain, dl, Reg, RegsToPass[i].second, InFlag);
1022  InFlag = Chain.getValue(1);
1023  }
1024 
1025  bool hasReturnsTwice = hasReturnsTwiceAttr(DAG, Callee, CLI.CB);
1026 
1027  // If the callee is a GlobalAddress node (quite common, every direct call is)
1028  // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
1029  // Likewise ExternalSymbol -> TargetExternalSymbol.
1032  if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
1033  Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, MVT::i32, 0, TF);
1034  else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee))
1035  Callee = DAG.getTargetExternalSymbol(E->getSymbol(), MVT::i32, TF);
1036 
1037  // Returns a chain & a flag for retval copy to use
1038  SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
1040  Ops.push_back(Chain);
1041  Ops.push_back(Callee);
1042  if (hasStructRetAttr)
1043  Ops.push_back(DAG.getTargetConstant(SRetArgSize, dl, MVT::i32));
1044  for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
1045  Register Reg = RegsToPass[i].first;
1046  if (!isTailCall)
1047  Reg = toCallerWindow(Reg);
1048  Ops.push_back(DAG.getRegister(Reg, RegsToPass[i].second.getValueType()));
1049  }
1050 
1051  // Add a register mask operand representing the call-preserved registers.
1052  const SparcRegisterInfo *TRI = Subtarget->getRegisterInfo();
1053  const uint32_t *Mask =
1054  ((hasReturnsTwice)
1055  ? TRI->getRTCallPreservedMask(CallConv)
1056  : TRI->getCallPreservedMask(DAG.getMachineFunction(), CallConv));
1057  assert(Mask && "Missing call preserved mask for calling convention");
1058  Ops.push_back(DAG.getRegisterMask(Mask));
1059 
1060  if (InFlag.getNode())
1061  Ops.push_back(InFlag);
1062 
1063  if (isTailCall) {
1065  return DAG.getNode(SPISD::TAIL_CALL, dl, MVT::Other, Ops);
1066  }
1067 
1068  Chain = DAG.getNode(SPISD::CALL, dl, NodeTys, Ops);
1069  InFlag = Chain.getValue(1);
1070 
1071  Chain = DAG.getCALLSEQ_END(Chain, ArgsSize, 0, InFlag, dl);
1072  InFlag = Chain.getValue(1);
1073 
1074  // Assign locations to each value returned by this call.
1076  CCState RVInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
1077  *DAG.getContext());
1078 
1079  RVInfo.AnalyzeCallResult(Ins, RetCC_Sparc32);
1080 
1081  // Copy all of the result registers out of their specified physreg.
1082  for (unsigned i = 0; i != RVLocs.size(); ++i) {
1083  assert(RVLocs[i].isRegLoc() && "Can only return in registers!");
1084  if (RVLocs[i].getLocVT() == MVT::v2i32) {
1085  SDValue Vec = DAG.getNode(ISD::UNDEF, dl, MVT::v2i32);
1086  SDValue Lo = DAG.getCopyFromReg(
1087  Chain, dl, toCallerWindow(RVLocs[i++].getLocReg()), MVT::i32, InFlag);
1088  Chain = Lo.getValue(1);
1089  InFlag = Lo.getValue(2);
1090  Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2i32, Vec, Lo,
1091  DAG.getConstant(0, dl, MVT::i32));
1092  SDValue Hi = DAG.getCopyFromReg(
1093  Chain, dl, toCallerWindow(RVLocs[i].getLocReg()), MVT::i32, InFlag);
1094  Chain = Hi.getValue(1);
1095  InFlag = Hi.getValue(2);
1096  Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2i32, Vec, Hi,
1097  DAG.getConstant(1, dl, MVT::i32));
1098  InVals.push_back(Vec);
1099  } else {
1100  Chain =
1101  DAG.getCopyFromReg(Chain, dl, toCallerWindow(RVLocs[i].getLocReg()),
1102  RVLocs[i].getValVT(), InFlag)
1103  .getValue(1);
1104  InFlag = Chain.getValue(2);
1105  InVals.push_back(Chain.getValue(0));
1106  }
1107  }
1108 
1109  return Chain;
1110 }
1111 
1112 // FIXME? Maybe this could be a TableGen attribute on some registers and
1113 // this table could be generated automatically from RegInfo.
1115  const MachineFunction &MF) const {
1117  .Case("i0", SP::I0).Case("i1", SP::I1).Case("i2", SP::I2).Case("i3", SP::I3)
1118  .Case("i4", SP::I4).Case("i5", SP::I5).Case("i6", SP::I6).Case("i7", SP::I7)
1119  .Case("o0", SP::O0).Case("o1", SP::O1).Case("o2", SP::O2).Case("o3", SP::O3)
1120  .Case("o4", SP::O4).Case("o5", SP::O5).Case("o6", SP::O6).Case("o7", SP::O7)
1121  .Case("l0", SP::L0).Case("l1", SP::L1).Case("l2", SP::L2).Case("l3", SP::L3)
1122  .Case("l4", SP::L4).Case("l5", SP::L5).Case("l6", SP::L6).Case("l7", SP::L7)
1123  .Case("g0", SP::G0).Case("g1", SP::G1).Case("g2", SP::G2).Case("g3", SP::G3)
1124  .Case("g4", SP::G4).Case("g5", SP::G5).Case("g6", SP::G6).Case("g7", SP::G7)
1125  .Default(0);
1126 
1127  if (Reg)
1128  return Reg;
1129 
1130  report_fatal_error("Invalid register name global variable");
1131 }
1132 
1133 // Fixup floating point arguments in the ... part of a varargs call.
1134 //
1135 // The SPARC v9 ABI requires that floating point arguments are treated the same
1136 // as integers when calling a varargs function. This does not apply to the
1137 // fixed arguments that are part of the function's prototype.
1138 //
1139 // This function post-processes a CCValAssign array created by
1140 // AnalyzeCallOperands().
1142  ArrayRef<ISD::OutputArg> Outs) {
1143  for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1144  const CCValAssign &VA = ArgLocs[i];
1145  MVT ValTy = VA.getLocVT();
1146  // FIXME: What about f32 arguments? C promotes them to f64 when calling
1147  // varargs functions.
1148  if (!VA.isRegLoc() || (ValTy != MVT::f64 && ValTy != MVT::f128))
1149  continue;
1150  // The fixed arguments to a varargs function still go in FP registers.
1151  if (Outs[VA.getValNo()].IsFixed)
1152  continue;
1153 
1154  // This floating point argument should be reassigned.
1155  CCValAssign NewVA;
1156 
1157  // Determine the offset into the argument array.
1158  Register firstReg = (ValTy == MVT::f64) ? SP::D0 : SP::Q0;
1159  unsigned argSize = (ValTy == MVT::f64) ? 8 : 16;
1160  unsigned Offset = argSize * (VA.getLocReg() - firstReg);
1161  assert(Offset < 16*8 && "Offset out of range, bad register enum?");
1162 
1163  if (Offset < 6*8) {
1164  // This argument should go in %i0-%i5.
1165  unsigned IReg = SP::I0 + Offset/8;
1166  if (ValTy == MVT::f64)
1167  // Full register, just bitconvert into i64.
1168  NewVA = CCValAssign::getReg(VA.getValNo(), VA.getValVT(),
1169  IReg, MVT::i64, CCValAssign::BCvt);
1170  else {
1171  assert(ValTy == MVT::f128 && "Unexpected type!");
1172  // Full register, just bitconvert into i128 -- We will lower this into
1173  // two i64s in LowerCall_64.
1174  NewVA = CCValAssign::getCustomReg(VA.getValNo(), VA.getValVT(),
1175  IReg, MVT::i128, CCValAssign::BCvt);
1176  }
1177  } else {
1178  // This needs to go to memory, we're out of integer registers.
1179  NewVA = CCValAssign::getMem(VA.getValNo(), VA.getValVT(),
1180  Offset, VA.getLocVT(), VA.getLocInfo());
1181  }
1182  ArgLocs[i] = NewVA;
1183  }
1184 }
1185 
1186 // Lower a call for the 64-bit ABI.
1187 SDValue
1189  SmallVectorImpl<SDValue> &InVals) const {
1190  SelectionDAG &DAG = CLI.DAG;
1191  SDLoc DL = CLI.DL;
1192  SDValue Chain = CLI.Chain;
1193  auto PtrVT = getPointerTy(DAG.getDataLayout());
1194 
1195  // Analyze operands of the call, assigning locations to each operand.
1197  CCState CCInfo(CLI.CallConv, CLI.IsVarArg, DAG.getMachineFunction(), ArgLocs,
1198  *DAG.getContext());
1199  CCInfo.AnalyzeCallOperands(CLI.Outs, CC_Sparc64);
1200 
1202  CCInfo, CLI, DAG.getMachineFunction());
1203 
1204  // Get the size of the outgoing arguments stack space requirement.
1205  // The stack offset computed by CC_Sparc64 includes all arguments.
1206  // Called functions expect 6 argument words to exist in the stack frame, used
1207  // or not.
1208  unsigned StackReserved = 6 * 8u;
1209  unsigned ArgsSize = std::max(StackReserved, CCInfo.getNextStackOffset());
1210 
1211  // Keep stack frames 16-byte aligned.
1212  ArgsSize = alignTo(ArgsSize, 16);
1213 
1214  // Varargs calls require special treatment.
1215  if (CLI.IsVarArg)
1216  fixupVariableFloatArgs(ArgLocs, CLI.Outs);
1217 
1218  assert(!CLI.IsTailCall || ArgsSize == StackReserved);
1219 
1220  // Adjust the stack pointer to make room for the arguments.
1221  // FIXME: Use hasReservedCallFrame to avoid %sp adjustments around all calls
1222  // with more than 6 arguments.
1223  if (!CLI.IsTailCall)
1224  Chain = DAG.getCALLSEQ_START(Chain, ArgsSize, 0, DL);
1225 
1226  // Collect the set of registers to pass to the function and their values.
1227  // This will be emitted as a sequence of CopyToReg nodes glued to the call
1228  // instruction.
1230 
1231  // Collect chains from all the memory opeations that copy arguments to the
1232  // stack. They must follow the stack pointer adjustment above and precede the
1233  // call instruction itself.
1234  SmallVector<SDValue, 8> MemOpChains;
1235 
1236  for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1237  const CCValAssign &VA = ArgLocs[i];
1238  SDValue Arg = CLI.OutVals[i];
1239 
1240  // Promote the value if needed.
1241  switch (VA.getLocInfo()) {
1242  default:
1243  llvm_unreachable("Unknown location info!");
1244  case CCValAssign::Full:
1245  break;
1246  case CCValAssign::SExt:
1247  Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Arg);
1248  break;
1249  case CCValAssign::ZExt:
1250  Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg);
1251  break;
1252  case CCValAssign::AExt:
1253  Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg);
1254  break;
1255  case CCValAssign::BCvt:
1256  // fixupVariableFloatArgs() may create bitcasts from f128 to i128. But
1257  // SPARC does not support i128 natively. Lower it into two i64, see below.
1258  if (!VA.needsCustom() || VA.getValVT() != MVT::f128
1259  || VA.getLocVT() != MVT::i128)
1260  Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg);
1261  break;
1262  }
1263 
1264  if (VA.isRegLoc()) {
1265  if (VA.needsCustom() && VA.getValVT() == MVT::f128
1266  && VA.getLocVT() == MVT::i128) {
1267  // Store and reload into the integer register reg and reg+1.
1268  unsigned Offset = 8 * (VA.getLocReg() - SP::I0);
1269  unsigned StackOffset = Offset + Subtarget->getStackPointerBias() + 128;
1270  SDValue StackPtr = DAG.getRegister(SP::O6, PtrVT);
1271  SDValue HiPtrOff = DAG.getIntPtrConstant(StackOffset, DL);
1272  HiPtrOff = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, HiPtrOff);
1273  SDValue LoPtrOff = DAG.getIntPtrConstant(StackOffset + 8, DL);
1274  LoPtrOff = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, LoPtrOff);
1275 
1276  // Store to %sp+BIAS+128+Offset
1277  SDValue Store =
1278  DAG.getStore(Chain, DL, Arg, HiPtrOff, MachinePointerInfo());
1279  // Load into Reg and Reg+1
1280  SDValue Hi64 =
1281  DAG.getLoad(MVT::i64, DL, Store, HiPtrOff, MachinePointerInfo());
1282  SDValue Lo64 =
1283  DAG.getLoad(MVT::i64, DL, Store, LoPtrOff, MachinePointerInfo());
1284 
1285  Register HiReg = VA.getLocReg();
1286  Register LoReg = VA.getLocReg() + 1;
1287  if (!CLI.IsTailCall) {
1288  HiReg = toCallerWindow(HiReg);
1289  LoReg = toCallerWindow(LoReg);
1290  }
1291 
1292  RegsToPass.push_back(std::make_pair(HiReg, Hi64));
1293  RegsToPass.push_back(std::make_pair(LoReg, Lo64));
1294  continue;
1295  }
1296 
1297  // The custom bit on an i32 return value indicates that it should be
1298  // passed in the high bits of the register.
1299  if (VA.getValVT() == MVT::i32 && VA.needsCustom()) {
1300  Arg = DAG.getNode(ISD::SHL, DL, MVT::i64, Arg,
1301  DAG.getConstant(32, DL, MVT::i32));
1302 
1303  // The next value may go in the low bits of the same register.
1304  // Handle both at once.
1305  if (i+1 < ArgLocs.size() && ArgLocs[i+1].isRegLoc() &&
1306  ArgLocs[i+1].getLocReg() == VA.getLocReg()) {
1308  CLI.OutVals[i+1]);
1309  Arg = DAG.getNode(ISD::OR, DL, MVT::i64, Arg, NV);
1310  // Skip the next value, it's already done.
1311  ++i;
1312  }
1313  }
1314 
1315  Register Reg = VA.getLocReg();
1316  if (!CLI.IsTailCall)
1317  Reg = toCallerWindow(Reg);
1318  RegsToPass.push_back(std::make_pair(Reg, Arg));
1319  continue;
1320  }
1321 
1322  assert(VA.isMemLoc());
1323 
1324  // Create a store off the stack pointer for this argument.
1325  SDValue StackPtr = DAG.getRegister(SP::O6, PtrVT);
1326  // The argument area starts at %fp+BIAS+128 in the callee frame,
1327  // %sp+BIAS+128 in ours.
1328  SDValue PtrOff = DAG.getIntPtrConstant(VA.getLocMemOffset() +
1329  Subtarget->getStackPointerBias() +
1330  128, DL);
1331  PtrOff = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, PtrOff);
1332  MemOpChains.push_back(
1333  DAG.getStore(Chain, DL, Arg, PtrOff, MachinePointerInfo()));
1334  }
1335 
1336  // Emit all stores, make sure they occur before the call.
1337  if (!MemOpChains.empty())
1338  Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
1339 
1340  // Build a sequence of CopyToReg nodes glued together with token chain and
1341  // glue operands which copy the outgoing args into registers. The InGlue is
1342  // necessary since all emitted instructions must be stuck together in order
1343  // to pass the live physical registers.
1344  SDValue InGlue;
1345  for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
1346  Chain = DAG.getCopyToReg(Chain, DL,
1347  RegsToPass[i].first, RegsToPass[i].second, InGlue);
1348  InGlue = Chain.getValue(1);
1349  }
1350 
1351  // If the callee is a GlobalAddress node (quite common, every direct call is)
1352  // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
1353  // Likewise ExternalSymbol -> TargetExternalSymbol.
1354  SDValue Callee = CLI.Callee;
1355  bool hasReturnsTwice = hasReturnsTwiceAttr(DAG, Callee, CLI.CB);
1358  if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
1359  Callee = DAG.getTargetGlobalAddress(G->getGlobal(), DL, PtrVT, 0, TF);
1360  else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee))
1361  Callee = DAG.getTargetExternalSymbol(E->getSymbol(), PtrVT, TF);
1362 
1363  // Build the operands for the call instruction itself.
1365  Ops.push_back(Chain);
1366  Ops.push_back(Callee);
1367  for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
1368  Ops.push_back(DAG.getRegister(RegsToPass[i].first,
1369  RegsToPass[i].second.getValueType()));
1370 
1371  // Add a register mask operand representing the call-preserved registers.
1372  const SparcRegisterInfo *TRI = Subtarget->getRegisterInfo();
1373  const uint32_t *Mask =
1374  ((hasReturnsTwice) ? TRI->getRTCallPreservedMask(CLI.CallConv)
1376  CLI.CallConv));
1377  assert(Mask && "Missing call preserved mask for calling convention");
1378  Ops.push_back(DAG.getRegisterMask(Mask));
1379 
1380  // Make sure the CopyToReg nodes are glued to the call instruction which
1381  // consumes the registers.
1382  if (InGlue.getNode())
1383  Ops.push_back(InGlue);
1384 
1385  // Now the call itself.
1386  if (CLI.IsTailCall) {
1388  return DAG.getNode(SPISD::TAIL_CALL, DL, MVT::Other, Ops);
1389  }
1390  SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
1391  Chain = DAG.getNode(SPISD::CALL, DL, NodeTys, Ops);
1392  InGlue = Chain.getValue(1);
1393 
1394  // Revert the stack pointer immediately after the call.
1395  Chain = DAG.getCALLSEQ_END(Chain, ArgsSize, 0, InGlue, DL);
1396  InGlue = Chain.getValue(1);
1397 
1398  // Now extract the return values. This is more or less the same as
1399  // LowerFormalArguments_64.
1400 
1401  // Assign locations to each value returned by this call.
1403  CCState RVInfo(CLI.CallConv, CLI.IsVarArg, DAG.getMachineFunction(), RVLocs,
1404  *DAG.getContext());
1405 
1406  // Set inreg flag manually for codegen generated library calls that
1407  // return float.
1408  if (CLI.Ins.size() == 1 && CLI.Ins[0].VT == MVT::f32 && !CLI.CB)
1409  CLI.Ins[0].Flags.setInReg();
1410 
1411  RVInfo.AnalyzeCallResult(CLI.Ins, RetCC_Sparc64);
1412 
1413  // Copy all of the result registers out of their specified physreg.
1414  for (unsigned i = 0; i != RVLocs.size(); ++i) {
1415  CCValAssign &VA = RVLocs[i];
1416  assert(VA.isRegLoc() && "Can only return in registers!");
1417  unsigned Reg = toCallerWindow(VA.getLocReg());
1418 
1419  // When returning 'inreg {i32, i32 }', two consecutive i32 arguments can
1420  // reside in the same register in the high and low bits. Reuse the
1421  // CopyFromReg previous node to avoid duplicate copies.
1422  SDValue RV;
1423  if (RegisterSDNode *SrcReg = dyn_cast<RegisterSDNode>(Chain.getOperand(1)))
1424  if (SrcReg->getReg() == Reg && Chain->getOpcode() == ISD::CopyFromReg)
1425  RV = Chain.getValue(0);
1426 
1427  // But usually we'll create a new CopyFromReg for a different register.
1428  if (!RV.getNode()) {
1429  RV = DAG.getCopyFromReg(Chain, DL, Reg, RVLocs[i].getLocVT(), InGlue);
1430  Chain = RV.getValue(1);
1431  InGlue = Chain.getValue(2);
1432  }
1433 
1434  // Get the high bits for i32 struct elements.
1435  if (VA.getValVT() == MVT::i32 && VA.needsCustom())
1436  RV = DAG.getNode(ISD::SRL, DL, VA.getLocVT(), RV,
1437  DAG.getConstant(32, DL, MVT::i32));
1438 
1439  // The callee promoted the return value, so insert an Assert?ext SDNode so
1440  // we won't promote the value again in this function.
1441  switch (VA.getLocInfo()) {
1442  case CCValAssign::SExt:
1443  RV = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), RV,
1444  DAG.getValueType(VA.getValVT()));
1445  break;
1446  case CCValAssign::ZExt:
1447  RV = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), RV,
1448  DAG.getValueType(VA.getValVT()));
1449  break;
1450  default:
1451  break;
1452  }
1453 
1454  // Truncate the register down to the return value type.
1455  if (VA.isExtInLoc())
1456  RV = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), RV);
1457 
1458  InVals.push_back(RV);
1459  }
1460 
1461  return Chain;
1462 }
1463 
1464 //===----------------------------------------------------------------------===//
1465 // TargetLowering Implementation
1466 //===----------------------------------------------------------------------===//
1467 
1469  if (AI->getOperation() == AtomicRMWInst::Xchg &&
1470  AI->getType()->getPrimitiveSizeInBits() == 32)
1471  return AtomicExpansionKind::None; // Uses xchg instruction
1472 
1474 }
1475 
1476 /// IntCondCCodeToICC - Convert a DAG integer condition code to a SPARC ICC
1477 /// condition.
1479  switch (CC) {
1480  default: llvm_unreachable("Unknown integer condition code!");
1481  case ISD::SETEQ: return SPCC::ICC_E;
1482  case ISD::SETNE: return SPCC::ICC_NE;
1483  case ISD::SETLT: return SPCC::ICC_L;
1484  case ISD::SETGT: return SPCC::ICC_G;
1485  case ISD::SETLE: return SPCC::ICC_LE;
1486  case ISD::SETGE: return SPCC::ICC_GE;
1487  case ISD::SETULT: return SPCC::ICC_CS;
1488  case ISD::SETULE: return SPCC::ICC_LEU;
1489  case ISD::SETUGT: return SPCC::ICC_GU;
1490  case ISD::SETUGE: return SPCC::ICC_CC;
1491  }
1492 }
1493 
1494 /// FPCondCCodeToFCC - Convert a DAG floatingp oint condition code to a SPARC
1495 /// FCC condition.
1497  switch (CC) {
1498  default: llvm_unreachable("Unknown fp condition code!");
1499  case ISD::SETEQ:
1500  case ISD::SETOEQ: return SPCC::FCC_E;
1501  case ISD::SETNE:
1502  case ISD::SETUNE: return SPCC::FCC_NE;
1503  case ISD::SETLT:
1504  case ISD::SETOLT: return SPCC::FCC_L;
1505  case ISD::SETGT:
1506  case ISD::SETOGT: return SPCC::FCC_G;
1507  case ISD::SETLE:
1508  case ISD::SETOLE: return SPCC::FCC_LE;
1509  case ISD::SETGE:
1510  case ISD::SETOGE: return SPCC::FCC_GE;
1511  case ISD::SETULT: return SPCC::FCC_UL;
1512  case ISD::SETULE: return SPCC::FCC_ULE;
1513  case ISD::SETUGT: return SPCC::FCC_UG;
1514  case ISD::SETUGE: return SPCC::FCC_UGE;
1515  case ISD::SETUO: return SPCC::FCC_U;
1516  case ISD::SETO: return SPCC::FCC_O;
1517  case ISD::SETONE: return SPCC::FCC_LG;
1518  case ISD::SETUEQ: return SPCC::FCC_UE;
1519  }
1520 }
1521 
1523  const SparcSubtarget &STI)
1524  : TargetLowering(TM), Subtarget(&STI) {
1525  MVT PtrVT = MVT::getIntegerVT(TM.getPointerSizeInBits(0));
1526 
1527  // Instructions which use registers as conditionals examine all the
1528  // bits (as does the pseudo SELECT_CC expansion). I don't think it
1529  // matters much whether it's ZeroOrOneBooleanContent, or
1530  // ZeroOrNegativeOneBooleanContent, so, arbitrarily choose the
1531  // former.
1534 
1535  // Set up the register classes.
1536  addRegisterClass(MVT::i32, &SP::IntRegsRegClass);
1537  if (!Subtarget->useSoftFloat()) {
1538  addRegisterClass(MVT::f32, &SP::FPRegsRegClass);
1539  addRegisterClass(MVT::f64, &SP::DFPRegsRegClass);
1540  addRegisterClass(MVT::f128, &SP::QFPRegsRegClass);
1541  }
1542  if (Subtarget->is64Bit()) {
1543  addRegisterClass(MVT::i64, &SP::I64RegsRegClass);
1544  } else {
1545  // On 32bit sparc, we define a double-register 32bit register
1546  // class, as well. This is modeled in LLVM as a 2-vector of i32.
1547  addRegisterClass(MVT::v2i32, &SP::IntPairRegClass);
1548 
1549  // ...but almost all operations must be expanded, so set that as
1550  // the default.
1551  for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) {
1553  }
1554  // Truncating/extending stores/loads are also not supported.
1559 
1563 
1566  }
1567  // However, load and store *are* legal.
1572 
1573  // And we need to promote i64 loads/stores into vector load/store
1576 
1577  // Sadly, this doesn't work:
1578  // AddPromotedToType(ISD::LOAD, MVT::i64, MVT::v2i32);
1579  // AddPromotedToType(ISD::STORE, MVT::i64, MVT::v2i32);
1580  }
1581 
1582  // Turn FP extload into load/fpextend
1583  for (MVT VT : MVT::fp_valuetypes()) {
1587  }
1588 
1589  // Sparc doesn't have i1 sign extending load
1590  for (MVT VT : MVT::integer_valuetypes())
1592 
1593  // Turn FP truncstore into trunc + store.
1600 
1601  // Custom legalize GlobalAddress nodes into LO/HI parts.
1606 
1607  // Sparc doesn't have sext_inreg, replace them with shl/sra
1611 
1612  // Sparc has no REM or DIVREM operations.
1617 
1618  // ... nor does SparcV9.
1619  if (Subtarget->is64Bit()) {
1624  }
1625 
1626  // Custom expand fp<->sint
1631 
1632  // Custom Expand fp<->uint
1637 
1638  // Lower f16 conversion operations into library calls
1645 
1648 
1649  // Sparc has no select or setcc: expand to SELECT_CC.
1654 
1659 
1660  // Sparc doesn't have BRCOND either, it has BR_CC.
1668 
1673 
1678 
1679  if (Subtarget->is64Bit()) {
1690 
1692  Subtarget->usePopc() ? Legal : Expand);
1699  }
1700 
1701  // ATOMICs.
1702  // Atomics are supported on SparcV9. 32-bit atomics are also
1703  // supported by some Leon SparcV8 variants. Otherwise, atomics
1704  // are unsupported.
1705  if (Subtarget->isV9())
1707  else if (Subtarget->hasLeonCasa())
1709  else
1711 
1713 
1715 
1717 
1718  // Custom Lower Atomic LOAD/STORE
1721 
1722  if (Subtarget->is64Bit()) {
1727  }
1728 
1729  if (!Subtarget->is64Bit()) {
1730  // These libcalls are not available in 32-bit.
1731  setLibcallName(RTLIB::MULO_I64, nullptr);
1732  setLibcallName(RTLIB::SHL_I128, nullptr);
1733  setLibcallName(RTLIB::SRL_I128, nullptr);
1734  setLibcallName(RTLIB::SRA_I128, nullptr);
1735  }
1736 
1737  setLibcallName(RTLIB::MULO_I128, nullptr);
1738 
1739  if (!Subtarget->isV9()) {
1740  // SparcV8 does not have FNEGD and FABSD.
1743  }
1744 
1771 
1775 
1776  // Expands to [SU]MUL_LOHI.
1780 
1781  if (Subtarget->useSoftMulDiv()) {
1782  // .umul works for both signed and unsigned
1785  setLibcallName(RTLIB::MUL_I32, ".umul");
1786 
1788  setLibcallName(RTLIB::SDIV_I32, ".div");
1789 
1791  setLibcallName(RTLIB::UDIV_I32, ".udiv");
1792 
1793  setLibcallName(RTLIB::SREM_I32, ".rem");
1794  setLibcallName(RTLIB::UREM_I32, ".urem");
1795  }
1796 
1797  if (Subtarget->is64Bit()) {
1802 
1805 
1809  }
1810 
1811  // VASTART needs to be custom lowered to use the VarArgsFrameIndex.
1813  // VAARG needs to be lowered to not do unaligned accesses for doubles.
1815 
1818 
1819  // Use the default implementation.
1825 
1827 
1829  Subtarget->usePopc() ? Legal : Expand);
1830 
1831  if (Subtarget->isV9() && Subtarget->hasHardQuad()) {
1834  } else {
1837  }
1838 
1839  if (Subtarget->hasHardQuad()) {
1847  if (Subtarget->isV9()) {
1850  } else {
1853  }
1854 
1855  if (!Subtarget->is64Bit()) {
1856  setLibcallName(RTLIB::FPTOSINT_F128_I64, "_Q_qtoll");
1857  setLibcallName(RTLIB::FPTOUINT_F128_I64, "_Q_qtoull");
1858  setLibcallName(RTLIB::SINTTOFP_I64_F128, "_Q_lltoq");
1859  setLibcallName(RTLIB::UINTTOFP_I64_F128, "_Q_ulltoq");
1860  }
1861 
1862  } else {
1863  // Custom legalize f128 operations.
1864 
1872 
1876 
1877  // Setup Runtime library names.
1878  if (Subtarget->is64Bit() && !Subtarget->useSoftFloat()) {
1879  setLibcallName(RTLIB::ADD_F128, "_Qp_add");
1880  setLibcallName(RTLIB::SUB_F128, "_Qp_sub");
1881  setLibcallName(RTLIB::MUL_F128, "_Qp_mul");
1882  setLibcallName(RTLIB::DIV_F128, "_Qp_div");
1883  setLibcallName(RTLIB::SQRT_F128, "_Qp_sqrt");
1884  setLibcallName(RTLIB::FPTOSINT_F128_I32, "_Qp_qtoi");
1885  setLibcallName(RTLIB::FPTOUINT_F128_I32, "_Qp_qtoui");
1886  setLibcallName(RTLIB::SINTTOFP_I32_F128, "_Qp_itoq");
1887  setLibcallName(RTLIB::UINTTOFP_I32_F128, "_Qp_uitoq");
1888  setLibcallName(RTLIB::FPTOSINT_F128_I64, "_Qp_qtox");
1889  setLibcallName(RTLIB::FPTOUINT_F128_I64, "_Qp_qtoux");
1890  setLibcallName(RTLIB::SINTTOFP_I64_F128, "_Qp_xtoq");
1891  setLibcallName(RTLIB::UINTTOFP_I64_F128, "_Qp_uxtoq");
1892  setLibcallName(RTLIB::FPEXT_F32_F128, "_Qp_stoq");
1893  setLibcallName(RTLIB::FPEXT_F64_F128, "_Qp_dtoq");
1894  setLibcallName(RTLIB::FPROUND_F128_F32, "_Qp_qtos");
1895  setLibcallName(RTLIB::FPROUND_F128_F64, "_Qp_qtod");
1896  } else if (!Subtarget->useSoftFloat()) {
1897  setLibcallName(RTLIB::ADD_F128, "_Q_add");
1898  setLibcallName(RTLIB::SUB_F128, "_Q_sub");
1899  setLibcallName(RTLIB::MUL_F128, "_Q_mul");
1900  setLibcallName(RTLIB::DIV_F128, "_Q_div");
1901  setLibcallName(RTLIB::SQRT_F128, "_Q_sqrt");
1902  setLibcallName(RTLIB::FPTOSINT_F128_I32, "_Q_qtoi");
1903  setLibcallName(RTLIB::FPTOUINT_F128_I32, "_Q_qtou");
1904  setLibcallName(RTLIB::SINTTOFP_I32_F128, "_Q_itoq");
1905  setLibcallName(RTLIB::UINTTOFP_I32_F128, "_Q_utoq");
1906  setLibcallName(RTLIB::FPTOSINT_F128_I64, "_Q_qtoll");
1907  setLibcallName(RTLIB::FPTOUINT_F128_I64, "_Q_qtoull");
1908  setLibcallName(RTLIB::SINTTOFP_I64_F128, "_Q_lltoq");
1909  setLibcallName(RTLIB::UINTTOFP_I64_F128, "_Q_ulltoq");
1910  setLibcallName(RTLIB::FPEXT_F32_F128, "_Q_stoq");
1911  setLibcallName(RTLIB::FPEXT_F64_F128, "_Q_dtoq");
1912  setLibcallName(RTLIB::FPROUND_F128_F32, "_Q_qtos");
1913  setLibcallName(RTLIB::FPROUND_F128_F64, "_Q_qtod");
1914  }
1915  }
1916 
1917  if (Subtarget->fixAllFDIVSQRT()) {
1918  // Promote FDIVS and FSQRTS to FDIVD and FSQRTD instructions instead as
1919  // the former instructions generate errata on LEON processors.
1922  }
1923 
1924  if (Subtarget->hasNoFMULS()) {
1926  }
1927 
1928  // Custom combine bitcast between f64 and v2i32
1929  if (!Subtarget->is64Bit())
1931 
1932  if (Subtarget->hasLeonCycleCounter())
1934 
1936 
1938 
1940 }
1941 
1943  return Subtarget->useSoftFloat();
1944 }
1945 
1946 const char *SparcTargetLowering::getTargetNodeName(unsigned Opcode) const {
1947  switch ((SPISD::NodeType)Opcode) {
1948  case SPISD::FIRST_NUMBER: break;
1949  case SPISD::CMPICC: return "SPISD::CMPICC";
1950  case SPISD::CMPFCC: return "SPISD::CMPFCC";
1951  case SPISD::CMPFCC_V9:
1952  return "SPISD::CMPFCC_V9";
1953  case SPISD::BRICC: return "SPISD::BRICC";
1954  case SPISD::BPICC:
1955  return "SPISD::BPICC";
1956  case SPISD::BPXCC:
1957  return "SPISD::BPXCC";
1958  case SPISD::BRFCC: return "SPISD::BRFCC";
1959  case SPISD::BRFCC_V9:
1960  return "SPISD::BRFCC_V9";
1961  case SPISD::SELECT_ICC: return "SPISD::SELECT_ICC";
1962  case SPISD::SELECT_XCC: return "SPISD::SELECT_XCC";
1963  case SPISD::SELECT_FCC: return "SPISD::SELECT_FCC";
1964  case SPISD::Hi: return "SPISD::Hi";
1965  case SPISD::Lo: return "SPISD::Lo";
1966  case SPISD::FTOI: return "SPISD::FTOI";
1967  case SPISD::ITOF: return "SPISD::ITOF";
1968  case SPISD::FTOX: return "SPISD::FTOX";
1969  case SPISD::XTOF: return "SPISD::XTOF";
1970  case SPISD::CALL: return "SPISD::CALL";
1971  case SPISD::RET_FLAG: return "SPISD::RET_FLAG";
1972  case SPISD::GLOBAL_BASE_REG: return "SPISD::GLOBAL_BASE_REG";
1973  case SPISD::FLUSHW: return "SPISD::FLUSHW";
1974  case SPISD::TLS_ADD: return "SPISD::TLS_ADD";
1975  case SPISD::TLS_LD: return "SPISD::TLS_LD";
1976  case SPISD::TLS_CALL: return "SPISD::TLS_CALL";
1977  case SPISD::TAIL_CALL: return "SPISD::TAIL_CALL";
1978  case SPISD::LOAD_GDOP: return "SPISD::LOAD_GDOP";
1979  }
1980  return nullptr;
1981 }
1982 
1984  EVT VT) const {
1985  if (!VT.isVector())
1986  return MVT::i32;
1988 }
1989 
1990 /// isMaskedValueZeroForTargetNode - Return true if 'Op & Mask' is known to
1991 /// be zero. Op is expected to be a target specific node. Used by DAG
1992 /// combiner.
1994  (const SDValue Op,
1995  KnownBits &Known,
1996  const APInt &DemandedElts,
1997  const SelectionDAG &DAG,
1998  unsigned Depth) const {
1999  KnownBits Known2;
2000  Known.resetAll();
2001 
2002  switch (Op.getOpcode()) {
2003  default: break;
2004  case SPISD::SELECT_ICC:
2005  case SPISD::SELECT_XCC:
2006  case SPISD::SELECT_FCC:
2007  Known = DAG.computeKnownBits(Op.getOperand(1), Depth + 1);
2008  Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
2009 
2010  // Only known if known in both the LHS and RHS.
2011  Known = KnownBits::commonBits(Known, Known2);
2012  break;
2013  }
2014 }
2015 
2016 // Look at LHS/RHS/CC and see if they are a lowered setcc instruction. If so
2017 // set LHS/RHS and SPCC to the LHS/RHS of the setcc and SPCC to the condition.
2019  ISD::CondCode CC, unsigned &SPCC) {
2020  if (isNullConstant(RHS) && CC == ISD::SETNE &&
2021  (((LHS.getOpcode() == SPISD::SELECT_ICC ||
2022  LHS.getOpcode() == SPISD::SELECT_XCC) &&
2023  LHS.getOperand(3).getOpcode() == SPISD::CMPICC) ||
2024  (LHS.getOpcode() == SPISD::SELECT_FCC &&
2025  (LHS.getOperand(3).getOpcode() == SPISD::CMPFCC ||
2026  LHS.getOperand(3).getOpcode() == SPISD::CMPFCC_V9))) &&
2027  isOneConstant(LHS.getOperand(0)) && isNullConstant(LHS.getOperand(1))) {
2028  SDValue CMPCC = LHS.getOperand(3);
2029  SPCC = cast<ConstantSDNode>(LHS.getOperand(2))->getZExtValue();
2030  LHS = CMPCC.getOperand(0);
2031  RHS = CMPCC.getOperand(1);
2032  }
2033 }
2034 
2035 // Convert to a target node and set target flags.
2037  SelectionDAG &DAG) const {
2038  if (const GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op))
2039  return DAG.getTargetGlobalAddress(GA->getGlobal(),
2040  SDLoc(GA),
2041  GA->getValueType(0),
2042  GA->getOffset(), TF);
2043 
2044  if (const ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op))
2045  return DAG.getTargetConstantPool(CP->getConstVal(), CP->getValueType(0),
2046  CP->getAlign(), CP->getOffset(), TF);
2047 
2048  if (const BlockAddressSDNode *BA = dyn_cast<BlockAddressSDNode>(Op))
2049  return DAG.getTargetBlockAddress(BA->getBlockAddress(),
2050  Op.getValueType(),
2051  0,
2052  TF);
2053 
2054  if (const ExternalSymbolSDNode *ES = dyn_cast<ExternalSymbolSDNode>(Op))
2055  return DAG.getTargetExternalSymbol(ES->getSymbol(),
2056  ES->getValueType(0), TF);
2057 
2058  llvm_unreachable("Unhandled address SDNode");
2059 }
2060 
2061 // Split Op into high and low parts according to HiTF and LoTF.
2062 // Return an ADD node combining the parts.
2064  unsigned HiTF, unsigned LoTF,
2065  SelectionDAG &DAG) const {
2066  SDLoc DL(Op);
2067  EVT VT = Op.getValueType();
2068  SDValue Hi = DAG.getNode(SPISD::Hi, DL, VT, withTargetFlags(Op, HiTF, DAG));
2069  SDValue Lo = DAG.getNode(SPISD::Lo, DL, VT, withTargetFlags(Op, LoTF, DAG));
2070  return DAG.getNode(ISD::ADD, DL, VT, Hi, Lo);
2071 }
2072 
2073 // Build SDNodes for producing an address from a GlobalAddress, ConstantPool,
2074 // or ExternalSymbol SDNode.
2076  SDLoc DL(Op);
2077  EVT VT = getPointerTy(DAG.getDataLayout());
2078 
2079  // Handle PIC mode first. SPARC needs a got load for every variable!
2080  if (isPositionIndependent()) {
2081  const Module *M = DAG.getMachineFunction().getFunction().getParent();
2082  PICLevel::Level picLevel = M->getPICLevel();
2083  SDValue Idx;
2084 
2085  if (picLevel == PICLevel::SmallPIC) {
2086  // This is the pic13 code model, the GOT is known to be smaller than 8KiB.
2087  Idx = DAG.getNode(SPISD::Lo, DL, Op.getValueType(),
2089  } else {
2090  // This is the pic32 code model, the GOT is known to be smaller than 4GB.
2093  }
2094 
2095  SDValue GlobalBase = DAG.getNode(SPISD::GLOBAL_BASE_REG, DL, VT);
2096  SDValue AbsAddr = DAG.getNode(ISD::ADD, DL, VT, GlobalBase, Idx);
2097  // GLOBAL_BASE_REG codegen'ed with call. Inform MFI that this
2098  // function has calls.
2100  MFI.setHasCalls(true);
2101  return DAG.getLoad(VT, DL, DAG.getEntryNode(), AbsAddr,
2103  }
2104 
2105  // This is one of the absolute code models.
2106  switch(getTargetMachine().getCodeModel()) {
2107  default:
2108  llvm_unreachable("Unsupported absolute code model");
2109  case CodeModel::Small:
2110  // abs32.
2113  case CodeModel::Medium: {
2114  // abs44.
2117  H44 = DAG.getNode(ISD::SHL, DL, VT, H44, DAG.getConstant(12, DL, MVT::i32));
2119  L44 = DAG.getNode(SPISD::Lo, DL, VT, L44);
2120  return DAG.getNode(ISD::ADD, DL, VT, H44, L44);
2121  }
2122  case CodeModel::Large: {
2123  // abs64.
2126  Hi = DAG.getNode(ISD::SHL, DL, VT, Hi, DAG.getConstant(32, DL, MVT::i32));
2129  return DAG.getNode(ISD::ADD, DL, VT, Hi, Lo);
2130  }
2131  }
2132 }
2133 
2135  SelectionDAG &DAG) const {
2136  return makeAddress(Op, DAG);
2137 }
2138 
2140  SelectionDAG &DAG) const {
2141  return makeAddress(Op, DAG);
2142 }
2143 
2145  SelectionDAG &DAG) const {
2146  return makeAddress(Op, DAG);
2147 }
2148 
2150  SelectionDAG &DAG) const {
2151 
2152  GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
2153  if (DAG.getTarget().useEmulatedTLS())
2154  return LowerToTLSEmulatedModel(GA, DAG);
2155 
2156  SDLoc DL(GA);
2157  const GlobalValue *GV = GA->getGlobal();
2158  EVT PtrVT = getPointerTy(DAG.getDataLayout());
2159 
2161 
2163  unsigned HiTF = ((model == TLSModel::GeneralDynamic)
2166  unsigned LoTF = ((model == TLSModel::GeneralDynamic)
2169  unsigned addTF = ((model == TLSModel::GeneralDynamic)
2172  unsigned callTF = ((model == TLSModel::GeneralDynamic)
2175 
2176  SDValue HiLo = makeHiLoPair(Op, HiTF, LoTF, DAG);
2177  SDValue Base = DAG.getNode(SPISD::GLOBAL_BASE_REG, DL, PtrVT);
2178  SDValue Argument = DAG.getNode(SPISD::TLS_ADD, DL, PtrVT, Base, HiLo,
2179  withTargetFlags(Op, addTF, DAG));
2180 
2181  SDValue Chain = DAG.getEntryNode();
2182  SDValue InFlag;
2183 
2184  Chain = DAG.getCALLSEQ_START(Chain, 1, 0, DL);
2185  Chain = DAG.getCopyToReg(Chain, DL, SP::O0, Argument, InFlag);
2186  InFlag = Chain.getValue(1);
2187  SDValue Callee = DAG.getTargetExternalSymbol("__tls_get_addr", PtrVT);
2188  SDValue Symbol = withTargetFlags(Op, callTF, DAG);
2189 
2190  SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
2191  const uint32_t *Mask = Subtarget->getRegisterInfo()->getCallPreservedMask(
2193  assert(Mask && "Missing call preserved mask for calling convention");
2194  SDValue Ops[] = {Chain,
2195  Callee,
2196  Symbol,
2197  DAG.getRegister(SP::O0, PtrVT),
2198  DAG.getRegisterMask(Mask),
2199  InFlag};
2200  Chain = DAG.getNode(SPISD::TLS_CALL, DL, NodeTys, Ops);
2201  InFlag = Chain.getValue(1);
2202  Chain = DAG.getCALLSEQ_END(Chain, 1, 0, InFlag, DL);
2203  InFlag = Chain.getValue(1);
2204  SDValue Ret = DAG.getCopyFromReg(Chain, DL, SP::O0, PtrVT, InFlag);
2205 
2207  return Ret;
2208 
2209  SDValue Hi = DAG.getNode(SPISD::Hi, DL, PtrVT,
2211  SDValue Lo = DAG.getNode(SPISD::Lo, DL, PtrVT,
2213  HiLo = DAG.getNode(ISD::XOR, DL, PtrVT, Hi, Lo);
2214  return DAG.getNode(SPISD::TLS_ADD, DL, PtrVT, Ret, HiLo,
2216  }
2217 
2218  if (model == TLSModel::InitialExec) {
2219  unsigned ldTF = ((PtrVT == MVT::i64)? SparcMCExpr::VK_Sparc_TLS_IE_LDX
2221 
2222  SDValue Base = DAG.getNode(SPISD::GLOBAL_BASE_REG, DL, PtrVT);
2223 
2224  // GLOBAL_BASE_REG codegen'ed with call. Inform MFI that this
2225  // function has calls.
2227  MFI.setHasCalls(true);
2228 
2229  SDValue TGA = makeHiLoPair(Op,
2232  SDValue Ptr = DAG.getNode(ISD::ADD, DL, PtrVT, Base, TGA);
2233  SDValue Offset = DAG.getNode(SPISD::TLS_LD,
2234  DL, PtrVT, Ptr,
2235  withTargetFlags(Op, ldTF, DAG));
2236  return DAG.getNode(SPISD::TLS_ADD, DL, PtrVT,
2237  DAG.getRegister(SP::G7, PtrVT), Offset,
2240  }
2241 
2243  SDValue Hi = DAG.getNode(SPISD::Hi, DL, PtrVT,
2245  SDValue Lo = DAG.getNode(SPISD::Lo, DL, PtrVT,
2247  SDValue Offset = DAG.getNode(ISD::XOR, DL, PtrVT, Hi, Lo);
2248 
2249  return DAG.getNode(ISD::ADD, DL, PtrVT,
2250  DAG.getRegister(SP::G7, PtrVT), Offset);
2251 }
2252 
2255  const SDLoc &DL,
2256  SelectionDAG &DAG) const {
2258  EVT ArgVT = Arg.getValueType();
2259  Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
2260 
2261  ArgListEntry Entry;
2262  Entry.Node = Arg;
2263  Entry.Ty = ArgTy;
2264 
2265  if (ArgTy->isFP128Ty()) {
2266  // Create a stack object and pass the pointer to the library function.
2267  int FI = MFI.CreateStackObject(16, Align(8), false);
2268  SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
2269  Chain = DAG.getStore(Chain, DL, Entry.Node, FIPtr, MachinePointerInfo(),
2270  Align(8));
2271 
2272  Entry.Node = FIPtr;
2273  Entry.Ty = PointerType::getUnqual(ArgTy);
2274  }
2275  Args.push_back(Entry);
2276  return Chain;
2277 }
2278 
2279 SDValue
2281  const char *LibFuncName,
2282  unsigned numArgs) const {
2283 
2284  ArgListTy Args;
2285 
2287  auto PtrVT = getPointerTy(DAG.getDataLayout());
2288 
2289  SDValue Callee = DAG.getExternalSymbol(LibFuncName, PtrVT);
2290  Type *RetTy = Op.getValueType().getTypeForEVT(*DAG.getContext());
2291  Type *RetTyABI = RetTy;
2292  SDValue Chain = DAG.getEntryNode();
2293  SDValue RetPtr;
2294 
2295  if (RetTy->isFP128Ty()) {
2296  // Create a Stack Object to receive the return value of type f128.
2297  ArgListEntry Entry;
2298  int RetFI = MFI.CreateStackObject(16, Align(8), false);
2299  RetPtr = DAG.getFrameIndex(RetFI, PtrVT);
2300  Entry.Node = RetPtr;
2301  Entry.Ty = PointerType::getUnqual(RetTy);
2302  if (!Subtarget->is64Bit()) {
2303  Entry.IsSRet = true;
2304  Entry.IndirectType = RetTy;
2305  }
2306  Entry.IsReturned = false;
2307  Args.push_back(Entry);
2308  RetTyABI = Type::getVoidTy(*DAG.getContext());
2309  }
2310 
2311  assert(Op->getNumOperands() >= numArgs && "Not enough operands!");
2312  for (unsigned i = 0, e = numArgs; i != e; ++i) {
2313  Chain = LowerF128_LibCallArg(Chain, Args, Op.getOperand(i), SDLoc(Op), DAG);
2314  }
2316  CLI.setDebugLoc(SDLoc(Op)).setChain(Chain)
2317  .setCallee(CallingConv::C, RetTyABI, Callee, std::move(Args));
2318 
2319  std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
2320 
2321  // chain is in second result.
2322  if (RetTyABI == RetTy)
2323  return CallInfo.first;
2324 
2325  assert (RetTy->isFP128Ty() && "Unexpected return type!");
2326 
2327  Chain = CallInfo.second;
2328 
2329  // Load RetPtr to get the return value.
2330  return DAG.getLoad(Op.getValueType(), SDLoc(Op), Chain, RetPtr,
2331  MachinePointerInfo(), Align(8));
2332 }
2333 
2335  unsigned &SPCC, const SDLoc &DL,
2336  SelectionDAG &DAG) const {
2337 
2338  const char *LibCall = nullptr;
2339  bool is64Bit = Subtarget->is64Bit();
2340  switch(SPCC) {
2341  default: llvm_unreachable("Unhandled conditional code!");
2342  case SPCC::FCC_E : LibCall = is64Bit? "_Qp_feq" : "_Q_feq"; break;
2343  case SPCC::FCC_NE : LibCall = is64Bit? "_Qp_fne" : "_Q_fne"; break;
2344  case SPCC::FCC_L : LibCall = is64Bit? "_Qp_flt" : "_Q_flt"; break;
2345  case SPCC::FCC_G : LibCall = is64Bit? "_Qp_fgt" : "_Q_fgt"; break;
2346  case SPCC::FCC_LE : LibCall = is64Bit? "_Qp_fle" : "_Q_fle"; break;
2347  case SPCC::FCC_GE : LibCall = is64Bit? "_Qp_fge" : "_Q_fge"; break;
2348  case SPCC::FCC_UL :
2349  case SPCC::FCC_ULE:
2350  case SPCC::FCC_UG :
2351  case SPCC::FCC_UGE:
2352  case SPCC::FCC_U :
2353  case SPCC::FCC_O :
2354  case SPCC::FCC_LG :
2355  case SPCC::FCC_UE : LibCall = is64Bit? "_Qp_cmp" : "_Q_cmp"; break;
2356  }
2357 
2358  auto PtrVT = getPointerTy(DAG.getDataLayout());
2359  SDValue Callee = DAG.getExternalSymbol(LibCall, PtrVT);
2360  Type *RetTy = Type::getInt32Ty(*DAG.getContext());
2361  ArgListTy Args;
2362  SDValue Chain = DAG.getEntryNode();
2363  Chain = LowerF128_LibCallArg(Chain, Args, LHS, DL, DAG);
2364  Chain = LowerF128_LibCallArg(Chain, Args, RHS, DL, DAG);
2365 
2367  CLI.setDebugLoc(DL).setChain(Chain)
2369 
2370  std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
2371 
2372  // result is in first, and chain is in second result.
2373  SDValue Result = CallInfo.first;
2374 
2375  switch(SPCC) {
2376  default: {
2377  SDValue RHS = DAG.getConstant(0, DL, Result.getValueType());
2378  SPCC = SPCC::ICC_NE;
2379  return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2380  }
2381  case SPCC::FCC_UL : {
2382  SDValue Mask = DAG.getConstant(1, DL, Result.getValueType());
2383  Result = DAG.getNode(ISD::AND, DL, Result.getValueType(), Result, Mask);
2384  SDValue RHS = DAG.getConstant(0, DL, Result.getValueType());
2385  SPCC = SPCC::ICC_NE;
2386  return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2387  }
2388  case SPCC::FCC_ULE: {
2389  SDValue RHS = DAG.getConstant(2, DL, Result.getValueType());
2390  SPCC = SPCC::ICC_NE;
2391  return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2392  }
2393  case SPCC::FCC_UG : {
2394  SDValue RHS = DAG.getConstant(1, DL, Result.getValueType());
2395  SPCC = SPCC::ICC_G;
2396  return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2397  }
2398  case SPCC::FCC_UGE: {
2399  SDValue RHS = DAG.getConstant(1, DL, Result.getValueType());
2400  SPCC = SPCC::ICC_NE;
2401  return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2402  }
2403 
2404  case SPCC::FCC_U : {
2405  SDValue RHS = DAG.getConstant(3, DL, Result.getValueType());
2406  SPCC = SPCC::ICC_E;
2407  return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2408  }
2409  case SPCC::FCC_O : {
2410  SDValue RHS = DAG.getConstant(3, DL, Result.getValueType());
2411  SPCC = SPCC::ICC_NE;
2412  return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2413  }
2414  case SPCC::FCC_LG : {
2415  SDValue Mask = DAG.getConstant(3, DL, Result.getValueType());
2416  Result = DAG.getNode(ISD::AND, DL, Result.getValueType(), Result, Mask);
2417  SDValue RHS = DAG.getConstant(0, DL, Result.getValueType());
2418  SPCC = SPCC::ICC_NE;
2419  return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2420  }
2421  case SPCC::FCC_UE : {
2422  SDValue Mask = DAG.getConstant(3, DL, Result.getValueType());
2423  Result = DAG.getNode(ISD::AND, DL, Result.getValueType(), Result, Mask);
2424  SDValue RHS = DAG.getConstant(0, DL, Result.getValueType());
2425  SPCC = SPCC::ICC_E;
2426  return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2427  }
2428  }
2429 }
2430 
2431 static SDValue
2433  const SparcTargetLowering &TLI) {
2434 
2435  if (Op.getOperand(0).getValueType() == MVT::f64)
2436  return TLI.LowerF128Op(Op, DAG,
2437  TLI.getLibcallName(RTLIB::FPEXT_F64_F128), 1);
2438 
2439  if (Op.getOperand(0).getValueType() == MVT::f32)
2440  return TLI.LowerF128Op(Op, DAG,
2441  TLI.getLibcallName(RTLIB::FPEXT_F32_F128), 1);
2442 
2443  llvm_unreachable("fpextend with non-float operand!");
2444  return SDValue();
2445 }
2446 
2447 static SDValue
2449  const SparcTargetLowering &TLI) {
2450  // FP_ROUND on f64 and f32 are legal.
2451  if (Op.getOperand(0).getValueType() != MVT::f128)
2452  return Op;
2453 
2454  if (Op.getValueType() == MVT::f64)
2455  return TLI.LowerF128Op(Op, DAG,
2456  TLI.getLibcallName(RTLIB::FPROUND_F128_F64), 1);
2457  if (Op.getValueType() == MVT::f32)
2458  return TLI.LowerF128Op(Op, DAG,
2459  TLI.getLibcallName(RTLIB::FPROUND_F128_F32), 1);
2460 
2461  llvm_unreachable("fpround to non-float!");
2462  return SDValue();
2463 }
2464 
2466  const SparcTargetLowering &TLI,
2467  bool hasHardQuad) {
2468  SDLoc dl(Op);
2469  EVT VT = Op.getValueType();
2470  assert(VT == MVT::i32 || VT == MVT::i64);
2471 
2472  // Expand f128 operations to fp128 abi calls.
2473  if (Op.getOperand(0).getValueType() == MVT::f128
2474  && (!hasHardQuad || !TLI.isTypeLegal(VT))) {
2475  const char *libName = TLI.getLibcallName(VT == MVT::i32
2476  ? RTLIB::FPTOSINT_F128_I32
2477  : RTLIB::FPTOSINT_F128_I64);
2478  return TLI.LowerF128Op(Op, DAG, libName, 1);
2479  }
2480 
2481  // Expand if the resulting type is illegal.
2482  if (!TLI.isTypeLegal(VT))
2483  return SDValue();
2484 
2485  // Otherwise, Convert the fp value to integer in an FP register.
2486  if (VT == MVT::i32)
2487  Op = DAG.getNode(SPISD::FTOI, dl, MVT::f32, Op.getOperand(0));
2488  else
2489  Op = DAG.getNode(SPISD::FTOX, dl, MVT::f64, Op.getOperand(0));
2490 
2491  return DAG.getNode(ISD::BITCAST, dl, VT, Op);
2492 }
2493 
2495  const SparcTargetLowering &TLI,
2496  bool hasHardQuad) {
2497  SDLoc dl(Op);
2498  EVT OpVT = Op.getOperand(0).getValueType();
2499  assert(OpVT == MVT::i32 || (OpVT == MVT::i64));
2500 
2501  EVT floatVT = (OpVT == MVT::i32) ? MVT::f32 : MVT::f64;
2502 
2503  // Expand f128 operations to fp128 ABI calls.
2504  if (Op.getValueType() == MVT::f128
2505  && (!hasHardQuad || !TLI.isTypeLegal(OpVT))) {
2506  const char *libName = TLI.getLibcallName(OpVT == MVT::i32
2507  ? RTLIB::SINTTOFP_I32_F128
2508  : RTLIB::SINTTOFP_I64_F128);
2509  return TLI.LowerF128Op(Op, DAG, libName, 1);
2510  }
2511 
2512  // Expand if the operand type is illegal.
2513  if (!TLI.isTypeLegal(OpVT))
2514  return SDValue();
2515 
2516  // Otherwise, Convert the int value to FP in an FP register.
2517  SDValue Tmp = DAG.getNode(ISD::BITCAST, dl, floatVT, Op.getOperand(0));
2518  unsigned opcode = (OpVT == MVT::i32)? SPISD::ITOF : SPISD::XTOF;
2519  return DAG.getNode(opcode, dl, Op.getValueType(), Tmp);
2520 }
2521 
2523  const SparcTargetLowering &TLI,
2524  bool hasHardQuad) {
2525  SDLoc dl(Op);
2526  EVT VT = Op.getValueType();
2527 
2528  // Expand if it does not involve f128 or the target has support for
2529  // quad floating point instructions and the resulting type is legal.
2530  if (Op.getOperand(0).getValueType() != MVT::f128 ||
2531  (hasHardQuad && TLI.isTypeLegal(VT)))
2532  return SDValue();
2533 
2534  assert(VT == MVT::i32 || VT == MVT::i64);
2535 
2536  return TLI.LowerF128Op(Op, DAG,
2537  TLI.getLibcallName(VT == MVT::i32
2538  ? RTLIB::FPTOUINT_F128_I32
2539  : RTLIB::FPTOUINT_F128_I64),
2540  1);
2541 }
2542 
2544  const SparcTargetLowering &TLI,
2545  bool hasHardQuad) {
2546  SDLoc dl(Op);
2547  EVT OpVT = Op.getOperand(0).getValueType();
2548  assert(OpVT == MVT::i32 || OpVT == MVT::i64);
2549 
2550  // Expand if it does not involve f128 or the target has support for
2551  // quad floating point instructions and the operand type is legal.
2552  if (Op.getValueType() != MVT::f128 || (hasHardQuad && TLI.isTypeLegal(OpVT)))
2553  return SDValue();
2554 
2555  return TLI.LowerF128Op(Op, DAG,
2556  TLI.getLibcallName(OpVT == MVT::i32
2557  ? RTLIB::UINTTOFP_I32_F128
2558  : RTLIB::UINTTOFP_I64_F128),
2559  1);
2560 }
2561 
2563  const SparcTargetLowering &TLI, bool hasHardQuad,
2564  bool isV9) {
2565  SDValue Chain = Op.getOperand(0);
2566  ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
2567  SDValue LHS = Op.getOperand(2);
2568  SDValue RHS = Op.getOperand(3);
2569  SDValue Dest = Op.getOperand(4);
2570  SDLoc dl(Op);
2571  unsigned Opc, SPCC = ~0U;
2572 
2573  // If this is a br_cc of a "setcc", and if the setcc got lowered into
2574  // an CMP[IF]CC/SELECT_[IF]CC pair, find the original compared values.
2575  LookThroughSetCC(LHS, RHS, CC, SPCC);
2576 
2577  // Get the condition flag.
2578  SDValue CompareFlag;
2579  if (LHS.getValueType().isInteger()) {
2580  CompareFlag = DAG.getNode(SPISD::CMPICC, dl, MVT::Glue, LHS, RHS);
2581  if (SPCC == ~0U) SPCC = IntCondCCodeToICC(CC);
2582  if (isV9)
2583  // 32-bit compares use the icc flags, 64-bit uses the xcc flags.
2584  Opc = LHS.getValueType() == MVT::i32 ? SPISD::BPICC : SPISD::BPXCC;
2585  else
2586  // Non-v9 targets don't have xcc.
2587  Opc = SPISD::BRICC;
2588  } else {
2589  if (!hasHardQuad && LHS.getValueType() == MVT::f128) {
2590  if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
2591  CompareFlag = TLI.LowerF128Compare(LHS, RHS, SPCC, dl, DAG);
2592  Opc = isV9 ? SPISD::BPICC : SPISD::BRICC;
2593  } else {
2594  unsigned CmpOpc = isV9 ? SPISD::CMPFCC_V9 : SPISD::CMPFCC;
2595  CompareFlag = DAG.getNode(CmpOpc, dl, MVT::Glue, LHS, RHS);
2596  if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
2597  Opc = isV9 ? SPISD::BRFCC_V9 : SPISD::BRFCC;
2598  }
2599  }
2600  return DAG.getNode(Opc, dl, MVT::Other, Chain, Dest,
2601  DAG.getConstant(SPCC, dl, MVT::i32), CompareFlag);
2602 }
2603 
2605  const SparcTargetLowering &TLI, bool hasHardQuad,
2606  bool isV9) {
2607  SDValue LHS = Op.getOperand(0);
2608  SDValue RHS = Op.getOperand(1);
2609  ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
2610  SDValue TrueVal = Op.getOperand(2);
2611  SDValue FalseVal = Op.getOperand(3);
2612  SDLoc dl(Op);
2613  unsigned Opc, SPCC = ~0U;
2614 
2615  // If this is a select_cc of a "setcc", and if the setcc got lowered into
2616  // an CMP[IF]CC/SELECT_[IF]CC pair, find the original compared values.
2617  LookThroughSetCC(LHS, RHS, CC, SPCC);
2618 
2619  SDValue CompareFlag;
2620  if (LHS.getValueType().isInteger()) {
2621  CompareFlag = DAG.getNode(SPISD::CMPICC, dl, MVT::Glue, LHS, RHS);
2622  Opc = LHS.getValueType() == MVT::i32 ?
2624  if (SPCC == ~0U) SPCC = IntCondCCodeToICC(CC);
2625  } else {
2626  if (!hasHardQuad && LHS.getValueType() == MVT::f128) {
2627  if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
2628  CompareFlag = TLI.LowerF128Compare(LHS, RHS, SPCC, dl, DAG);
2629  Opc = SPISD::SELECT_ICC;
2630  } else {
2631  unsigned CmpOpc = isV9 ? SPISD::CMPFCC_V9 : SPISD::CMPFCC;
2632  CompareFlag = DAG.getNode(CmpOpc, dl, MVT::Glue, LHS, RHS);
2633  Opc = SPISD::SELECT_FCC;
2634  if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
2635  }
2636  }
2637  return DAG.getNode(Opc, dl, TrueVal.getValueType(), TrueVal, FalseVal,
2638  DAG.getConstant(SPCC, dl, MVT::i32), CompareFlag);
2639 }
2640 
2642  const SparcTargetLowering &TLI) {
2643  MachineFunction &MF = DAG.getMachineFunction();
2645  auto PtrVT = TLI.getPointerTy(DAG.getDataLayout());
2646 
2647  // Need frame address to find the address of VarArgsFrameIndex.
2649 
2650  // vastart just stores the address of the VarArgsFrameIndex slot into the
2651  // memory location argument.
2652  SDLoc DL(Op);
2653  SDValue Offset =
2654  DAG.getNode(ISD::ADD, DL, PtrVT, DAG.getRegister(SP::I6, PtrVT),
2655  DAG.getIntPtrConstant(FuncInfo->getVarArgsFrameOffset(), DL));
2656  const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
2657  return DAG.getStore(Op.getOperand(0), DL, Offset, Op.getOperand(1),
2658  MachinePointerInfo(SV));
2659 }
2660 
2662  SDNode *Node = Op.getNode();
2663  EVT VT = Node->getValueType(0);
2664  SDValue InChain = Node->getOperand(0);
2665  SDValue VAListPtr = Node->getOperand(1);
2666  EVT PtrVT = VAListPtr.getValueType();
2667  const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
2668  SDLoc DL(Node);
2669  SDValue VAList =
2670  DAG.getLoad(PtrVT, DL, InChain, VAListPtr, MachinePointerInfo(SV));
2671  // Increment the pointer, VAList, to the next vaarg.
2672  SDValue NextPtr = DAG.getNode(ISD::ADD, DL, PtrVT, VAList,
2673  DAG.getIntPtrConstant(VT.getSizeInBits()/8,
2674  DL));
2675  // Store the incremented VAList to the legalized pointer.
2676  InChain = DAG.getStore(VAList.getValue(1), DL, NextPtr, VAListPtr,
2677  MachinePointerInfo(SV));
2678  // Load the actual argument out of the pointer VAList.
2679  // We can't count on greater alignment than the word size.
2680  return DAG.getLoad(
2681  VT, DL, InChain, VAList, MachinePointerInfo(),
2682  std::min(PtrVT.getFixedSizeInBits(), VT.getFixedSizeInBits()) / 8);
2683 }
2684 
2686  const SparcSubtarget *Subtarget) {
2687  SDValue Chain = Op.getOperand(0); // Legalize the chain.
2688  SDValue Size = Op.getOperand(1); // Legalize the size.
2689  MaybeAlign Alignment =
2690  cast<ConstantSDNode>(Op.getOperand(2))->getMaybeAlignValue();
2691  Align StackAlign = Subtarget->getFrameLowering()->getStackAlign();
2692  EVT VT = Size->getValueType(0);
2693  SDLoc dl(Op);
2694 
2695  // TODO: implement over-aligned alloca. (Note: also implies
2696  // supporting support for overaligned function frames + dynamic
2697  // allocations, at all, which currently isn't supported)
2698  if (Alignment && *Alignment > StackAlign) {
2699  const MachineFunction &MF = DAG.getMachineFunction();
2700  report_fatal_error("Function \"" + Twine(MF.getName()) + "\": "
2701  "over-aligned dynamic alloca not supported.");
2702  }
2703 
2704  // The resultant pointer needs to be above the register spill area
2705  // at the bottom of the stack.
2706  unsigned regSpillArea;
2707  if (Subtarget->is64Bit()) {
2708  regSpillArea = 128;
2709  } else {
2710  // On Sparc32, the size of the spill area is 92. Unfortunately,
2711  // that's only 4-byte aligned, not 8-byte aligned (the stack
2712  // pointer is 8-byte aligned). So, if the user asked for an 8-byte
2713  // aligned dynamic allocation, we actually need to add 96 to the
2714  // bottom of the stack, instead of 92, to ensure 8-byte alignment.
2715 
2716  // That also means adding 4 to the size of the allocation --
2717  // before applying the 8-byte rounding. Unfortunately, we the
2718  // value we get here has already had rounding applied. So, we need
2719  // to add 8, instead, wasting a bit more memory.
2720 
2721  // Further, this only actually needs to be done if the required
2722  // alignment is > 4, but, we've lost that info by this point, too,
2723  // so we always apply it.
2724 
2725  // (An alternative approach would be to always reserve 96 bytes
2726  // instead of the required 92, but then we'd waste 4 extra bytes
2727  // in every frame, not just those with dynamic stack allocations)
2728 
2729  // TODO: modify code in SelectionDAGBuilder to make this less sad.
2730 
2731  Size = DAG.getNode(ISD::ADD, dl, VT, Size,
2732  DAG.getConstant(8, dl, VT));
2733  regSpillArea = 96;
2734  }
2735 
2736  unsigned SPReg = SP::O6;
2737  SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, VT);
2738  SDValue NewSP = DAG.getNode(ISD::SUB, dl, VT, SP, Size); // Value
2739  Chain = DAG.getCopyToReg(SP.getValue(1), dl, SPReg, NewSP); // Output chain
2740 
2741  regSpillArea += Subtarget->getStackPointerBias();
2742 
2743  SDValue NewVal = DAG.getNode(ISD::ADD, dl, VT, NewSP,
2744  DAG.getConstant(regSpillArea, dl, VT));
2745  SDValue Ops[2] = { NewVal, Chain };
2746  return DAG.getMergeValues(Ops, dl);
2747 }
2748 
2749 
2751  SDLoc dl(Op);
2752  SDValue Chain = DAG.getNode(SPISD::FLUSHW,
2753  dl, MVT::Other, DAG.getEntryNode());
2754  return Chain;
2755 }
2756 
2758  const SparcSubtarget *Subtarget,
2759  bool AlwaysFlush = false) {
2761  MFI.setFrameAddressIsTaken(true);
2762 
2763  EVT VT = Op.getValueType();
2764  SDLoc dl(Op);
2765  unsigned FrameReg = SP::I6;
2766  unsigned stackBias = Subtarget->getStackPointerBias();
2767 
2768  SDValue FrameAddr;
2769  SDValue Chain;
2770 
2771  // flush first to make sure the windowed registers' values are in stack
2772  Chain = (depth || AlwaysFlush) ? getFLUSHW(Op, DAG) : DAG.getEntryNode();
2773 
2774  FrameAddr = DAG.getCopyFromReg(Chain, dl, FrameReg, VT);
2775 
2776  unsigned Offset = (Subtarget->is64Bit()) ? (stackBias + 112) : 56;
2777 
2778  while (depth--) {
2779  SDValue Ptr = DAG.getNode(ISD::ADD, dl, VT, FrameAddr,
2780  DAG.getIntPtrConstant(Offset, dl));
2781  FrameAddr = DAG.getLoad(VT, dl, Chain, Ptr, MachinePointerInfo());
2782  }
2783  if (Subtarget->is64Bit())
2784  FrameAddr = DAG.getNode(ISD::ADD, dl, VT, FrameAddr,
2785  DAG.getIntPtrConstant(stackBias, dl));
2786  return FrameAddr;
2787 }
2788 
2789 
2791  const SparcSubtarget *Subtarget) {
2792 
2793  uint64_t depth = Op.getConstantOperandVal(0);
2794 
2795  return getFRAMEADDR(depth, Op, DAG, Subtarget);
2796 
2797 }
2798 
2800  const SparcTargetLowering &TLI,
2801  const SparcSubtarget *Subtarget) {
2802  MachineFunction &MF = DAG.getMachineFunction();
2803  MachineFrameInfo &MFI = MF.getFrameInfo();
2804  MFI.setReturnAddressIsTaken(true);
2805 
2807  return SDValue();
2808 
2809  EVT VT = Op.getValueType();
2810  SDLoc dl(Op);
2811  uint64_t depth = Op.getConstantOperandVal(0);
2812 
2813  SDValue RetAddr;
2814  if (depth == 0) {
2815  auto PtrVT = TLI.getPointerTy(DAG.getDataLayout());
2816  Register RetReg = MF.addLiveIn(SP::I7, TLI.getRegClassFor(PtrVT));
2817  RetAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, RetReg, VT);
2818  return RetAddr;
2819  }
2820 
2821  // Need frame address to find return address of the caller.
2822  SDValue FrameAddr = getFRAMEADDR(depth - 1, Op, DAG, Subtarget, true);
2823 
2824  unsigned Offset = (Subtarget->is64Bit()) ? 120 : 60;
2825  SDValue Ptr = DAG.getNode(ISD::ADD,
2826  dl, VT,
2827  FrameAddr,
2828  DAG.getIntPtrConstant(Offset, dl));
2829  RetAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), Ptr, MachinePointerInfo());
2830 
2831  return RetAddr;
2832 }
2833 
2834 static SDValue LowerF64Op(SDValue SrcReg64, const SDLoc &dl, SelectionDAG &DAG,
2835  unsigned opcode) {
2836  assert(SrcReg64.getValueType() == MVT::f64 && "LowerF64Op called on non-double!");
2837  assert(opcode == ISD::FNEG || opcode == ISD::FABS);
2838 
2839  // Lower fneg/fabs on f64 to fneg/fabs on f32.
2840  // fneg f64 => fneg f32:sub_even, fmov f32:sub_odd.
2841  // fabs f64 => fabs f32:sub_even, fmov f32:sub_odd.
2842 
2843  // Note: in little-endian, the floating-point value is stored in the
2844  // registers are in the opposite order, so the subreg with the sign
2845  // bit is the highest-numbered (odd), rather than the
2846  // lowest-numbered (even).
2847 
2848  SDValue Hi32 = DAG.getTargetExtractSubreg(SP::sub_even, dl, MVT::f32,
2849  SrcReg64);
2850  SDValue Lo32 = DAG.getTargetExtractSubreg(SP::sub_odd, dl, MVT::f32,
2851  SrcReg64);
2852 
2853  if (DAG.getDataLayout().isLittleEndian())
2854  Lo32 = DAG.getNode(opcode, dl, MVT::f32, Lo32);
2855  else
2856  Hi32 = DAG.getNode(opcode, dl, MVT::f32, Hi32);
2857 
2858  SDValue DstReg64 = SDValue(DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF,
2859  dl, MVT::f64), 0);
2860  DstReg64 = DAG.getTargetInsertSubreg(SP::sub_even, dl, MVT::f64,
2861  DstReg64, Hi32);
2862  DstReg64 = DAG.getTargetInsertSubreg(SP::sub_odd, dl, MVT::f64,
2863  DstReg64, Lo32);
2864  return DstReg64;
2865 }
2866 
2867 // Lower a f128 load into two f64 loads.
2869 {
2870  SDLoc dl(Op);
2871  LoadSDNode *LdNode = cast<LoadSDNode>(Op.getNode());
2872  assert(LdNode->getOffset().isUndef() && "Unexpected node type");
2873 
2874  Align Alignment = commonAlignment(LdNode->getOriginalAlign(), 8);
2875 
2876  SDValue Hi64 =
2877  DAG.getLoad(MVT::f64, dl, LdNode->getChain(), LdNode->getBasePtr(),
2878  LdNode->getPointerInfo(), Alignment);
2879  EVT addrVT = LdNode->getBasePtr().getValueType();
2880  SDValue LoPtr = DAG.getNode(ISD::ADD, dl, addrVT,
2881  LdNode->getBasePtr(),
2882  DAG.getConstant(8, dl, addrVT));
2883  SDValue Lo64 = DAG.getLoad(MVT::f64, dl, LdNode->getChain(), LoPtr,
2884  LdNode->getPointerInfo().getWithOffset(8),
2885  Alignment);
2886 
2887  SDValue SubRegEven = DAG.getTargetConstant(SP::sub_even64, dl, MVT::i32);
2888  SDValue SubRegOdd = DAG.getTargetConstant(SP::sub_odd64, dl, MVT::i32);
2889 
2890  SDNode *InFP128 = DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF,
2891  dl, MVT::f128);
2892  InFP128 = DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, dl,
2893  MVT::f128,
2894  SDValue(InFP128, 0),
2895  Hi64,
2896  SubRegEven);
2897  InFP128 = DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, dl,
2898  MVT::f128,
2899  SDValue(InFP128, 0),
2900  Lo64,
2901  SubRegOdd);
2902  SDValue OutChains[2] = { SDValue(Hi64.getNode(), 1),
2903  SDValue(Lo64.getNode(), 1) };
2904  SDValue OutChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
2905  SDValue Ops[2] = {SDValue(InFP128,0), OutChain};
2906  return DAG.getMergeValues(Ops, dl);
2907 }
2908 
2910 {
2911  LoadSDNode *LdNode = cast<LoadSDNode>(Op.getNode());
2912 
2913  EVT MemVT = LdNode->getMemoryVT();
2914  if (MemVT == MVT::f128)
2915  return LowerF128Load(Op, DAG);
2916 
2917  return Op;
2918 }
2919 
2920 // Lower a f128 store into two f64 stores.
2922  SDLoc dl(Op);
2923  StoreSDNode *StNode = cast<StoreSDNode>(Op.getNode());
2924  assert(StNode->getOffset().isUndef() && "Unexpected node type");
2925 
2926  SDValue SubRegEven = DAG.getTargetConstant(SP::sub_even64, dl, MVT::i32);
2927  SDValue SubRegOdd = DAG.getTargetConstant(SP::sub_odd64, dl, MVT::i32);
2928 
2929  SDNode *Hi64 = DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG,
2930  dl,
2931  MVT::f64,
2932  StNode->getValue(),
2933  SubRegEven);
2934  SDNode *Lo64 = DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG,
2935  dl,
2936  MVT::f64,
2937  StNode->getValue(),
2938  SubRegOdd);
2939 
2940  Align Alignment = commonAlignment(StNode->getOriginalAlign(), 8);
2941 
2942  SDValue OutChains[2];
2943  OutChains[0] =
2944  DAG.getStore(StNode->getChain(), dl, SDValue(Hi64, 0),
2945  StNode->getBasePtr(), StNode->getPointerInfo(),
2946  Alignment);
2947  EVT addrVT = StNode->getBasePtr().getValueType();
2948  SDValue LoPtr = DAG.getNode(ISD::ADD, dl, addrVT,
2949  StNode->getBasePtr(),
2950  DAG.getConstant(8, dl, addrVT));
2951  OutChains[1] = DAG.getStore(StNode->getChain(), dl, SDValue(Lo64, 0), LoPtr,
2952  StNode->getPointerInfo().getWithOffset(8),
2953  Alignment);
2954  return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
2955 }
2956 
2958 {
2959  SDLoc dl(Op);
2960  StoreSDNode *St = cast<StoreSDNode>(Op.getNode());
2961 
2962  EVT MemVT = St->getMemoryVT();
2963  if (MemVT == MVT::f128)
2964  return LowerF128Store(Op, DAG);
2965 
2966  if (MemVT == MVT::i64) {
2967  // Custom handling for i64 stores: turn it into a bitcast and a
2968  // v2i32 store.
2969  SDValue Val = DAG.getNode(ISD::BITCAST, dl, MVT::v2i32, St->getValue());
2970  SDValue Chain = DAG.getStore(
2971  St->getChain(), dl, Val, St->getBasePtr(), St->getPointerInfo(),
2972  St->getOriginalAlign(), St->getMemOperand()->getFlags(),
2973  St->getAAInfo());
2974  return Chain;
2975  }
2976 
2977  return SDValue();
2978 }
2979 
2980 static SDValue LowerFNEGorFABS(SDValue Op, SelectionDAG &DAG, bool isV9) {
2981  assert((Op.getOpcode() == ISD::FNEG || Op.getOpcode() == ISD::FABS)
2982  && "invalid opcode");
2983 
2984  SDLoc dl(Op);
2985 
2986  if (Op.getValueType() == MVT::f64)
2987  return LowerF64Op(Op.getOperand(0), dl, DAG, Op.getOpcode());
2988  if (Op.getValueType() != MVT::f128)
2989  return Op;
2990 
2991  // Lower fabs/fneg on f128 to fabs/fneg on f64
2992  // fabs/fneg f128 => fabs/fneg f64:sub_even64, fmov f64:sub_odd64
2993  // (As with LowerF64Op, on little-endian, we need to negate the odd
2994  // subreg)
2995 
2996  SDValue SrcReg128 = Op.getOperand(0);
2997  SDValue Hi64 = DAG.getTargetExtractSubreg(SP::sub_even64, dl, MVT::f64,
2998  SrcReg128);
2999  SDValue Lo64 = DAG.getTargetExtractSubreg(SP::sub_odd64, dl, MVT::f64,
3000  SrcReg128);
3001 
3002  if (DAG.getDataLayout().isLittleEndian()) {
3003  if (isV9)
3004  Lo64 = DAG.getNode(Op.getOpcode(), dl, MVT::f64, Lo64);
3005  else
3006  Lo64 = LowerF64Op(Lo64, dl, DAG, Op.getOpcode());
3007  } else {
3008  if (isV9)
3009  Hi64 = DAG.getNode(Op.getOpcode(), dl, MVT::f64, Hi64);
3010  else
3011  Hi64 = LowerF64Op(Hi64, dl, DAG, Op.getOpcode());
3012  }
3013 
3014  SDValue DstReg128 = SDValue(DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF,
3015  dl, MVT::f128), 0);
3016  DstReg128 = DAG.getTargetInsertSubreg(SP::sub_even64, dl, MVT::f128,
3017  DstReg128, Hi64);
3018  DstReg128 = DAG.getTargetInsertSubreg(SP::sub_odd64, dl, MVT::f128,
3019  DstReg128, Lo64);
3020  return DstReg128;
3021 }
3022 
3024 
3025  if (Op.getValueType() != MVT::i64)
3026  return Op;
3027 
3028  SDLoc dl(Op);
3029  SDValue Src1 = Op.getOperand(0);
3030  SDValue Src1Lo = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Src1);
3031  SDValue Src1Hi = DAG.getNode(ISD::SRL, dl, MVT::i64, Src1,
3032  DAG.getConstant(32, dl, MVT::i64));
3033  Src1Hi = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Src1Hi);
3034 
3035  SDValue Src2 = Op.getOperand(1);
3036  SDValue Src2Lo = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Src2);
3037  SDValue Src2Hi = DAG.getNode(ISD::SRL, dl, MVT::i64, Src2,
3038  DAG.getConstant(32, dl, MVT::i64));
3039  Src2Hi = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Src2Hi);
3040 
3041 
3042  bool hasChain = false;
3043  unsigned hiOpc = Op.getOpcode();
3044  switch (Op.getOpcode()) {
3045  default: llvm_unreachable("Invalid opcode");
3046  case ISD::ADDC: hiOpc = ISD::ADDE; break;
3047  case ISD::ADDE: hasChain = true; break;
3048  case ISD::SUBC: hiOpc = ISD::SUBE; break;
3049  case ISD::SUBE: hasChain = true; break;
3050  }
3051  SDValue Lo;
3052  SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Glue);
3053  if (hasChain) {
3054  Lo = DAG.getNode(Op.getOpcode(), dl, VTs, Src1Lo, Src2Lo,
3055  Op.getOperand(2));
3056  } else {
3057  Lo = DAG.getNode(Op.getOpcode(), dl, VTs, Src1Lo, Src2Lo);
3058  }
3059  SDValue Hi = DAG.getNode(hiOpc, dl, VTs, Src1Hi, Src2Hi, Lo.getValue(1));
3060  SDValue Carry = Hi.getValue(1);
3061 
3062  Lo = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Lo);
3063  Hi = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Hi);
3064  Hi = DAG.getNode(ISD::SHL, dl, MVT::i64, Hi,
3065  DAG.getConstant(32, dl, MVT::i64));
3066 
3067  SDValue Dst = DAG.getNode(ISD::OR, dl, MVT::i64, Hi, Lo);
3068  SDValue Ops[2] = { Dst, Carry };
3069  return DAG.getMergeValues(Ops, dl);
3070 }
3071 
3072 // Custom lower UMULO/SMULO for SPARC. This code is similar to ExpandNode()
3073 // in LegalizeDAG.cpp except the order of arguments to the library function.
3075  const SparcTargetLowering &TLI)
3076 {
3077  unsigned opcode = Op.getOpcode();
3078  assert((opcode == ISD::UMULO || opcode == ISD::SMULO) && "Invalid Opcode.");
3079 
3080  bool isSigned = (opcode == ISD::SMULO);
3081  EVT VT = MVT::i64;
3082  EVT WideVT = MVT::i128;
3083  SDLoc dl(Op);
3084  SDValue LHS = Op.getOperand(0);
3085 
3086  if (LHS.getValueType() != VT)
3087  return Op;
3088 
3089  SDValue ShiftAmt = DAG.getConstant(63, dl, VT);
3090 
3091  SDValue RHS = Op.getOperand(1);
3092  SDValue HiLHS, HiRHS;
3093  if (isSigned) {
3094  HiLHS = DAG.getNode(ISD::SRA, dl, VT, LHS, ShiftAmt);
3095  HiRHS = DAG.getNode(ISD::SRA, dl, MVT::i64, RHS, ShiftAmt);
3096  } else {
3097  HiLHS = DAG.getConstant(0, dl, VT);
3098  HiRHS = DAG.getConstant(0, dl, MVT::i64);
3099  }
3100 
3101  SDValue Args[] = { HiLHS, LHS, HiRHS, RHS };
3102 
3104  CallOptions.setSExt(isSigned);
3105  SDValue MulResult = TLI.makeLibCall(DAG,
3106  RTLIB::MUL_I128, WideVT,
3107  Args, CallOptions, dl).first;
3108  SDValue BottomHalf = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VT,
3109  MulResult, DAG.getIntPtrConstant(0, dl));
3110  SDValue TopHalf = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VT,
3111  MulResult, DAG.getIntPtrConstant(1, dl));
3112  if (isSigned) {
3113  SDValue Tmp1 = DAG.getNode(ISD::SRA, dl, VT, BottomHalf, ShiftAmt);
3114  TopHalf = DAG.getSetCC(dl, MVT::i32, TopHalf, Tmp1, ISD::SETNE);
3115  } else {
3116  TopHalf = DAG.getSetCC(dl, MVT::i32, TopHalf, DAG.getConstant(0, dl, VT),
3117  ISD::SETNE);
3118  }
3119  // MulResult is a node with an illegal type. Because such things are not
3120  // generally permitted during this phase of legalization, ensure that
3121  // nothing is left using the node. The above EXTRACT_ELEMENT nodes should have
3122  // been folded.
3123  assert(MulResult->use_empty() && "Illegally typed node still in use!");
3124 
3125  SDValue Ops[2] = { BottomHalf, TopHalf } ;
3126  return DAG.getMergeValues(Ops, dl);
3127 }
3128 
3130  if (isStrongerThanMonotonic(cast<AtomicSDNode>(Op)->getSuccessOrdering())) {
3131  // Expand with a fence.
3132  return SDValue();
3133  }
3134 
3135  // Monotonic load/stores are legal.
3136  return Op;
3137 }
3138 
3140  SelectionDAG &DAG) const {
3141  unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
3142  SDLoc dl(Op);
3143  switch (IntNo) {
3144  default: return SDValue(); // Don't custom lower most intrinsics.
3145  case Intrinsic::thread_pointer: {
3146  EVT PtrVT = getPointerTy(DAG.getDataLayout());
3147  return DAG.getRegister(SP::G7, PtrVT);
3148  }
3149  }
3150 }
3151 
3154 
3155  bool hasHardQuad = Subtarget->hasHardQuad();
3156  bool isV9 = Subtarget->isV9();
3157 
3158  switch (Op.getOpcode()) {
3159  default: llvm_unreachable("Should not custom lower this!");
3160 
3161  case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG, *this,
3162  Subtarget);
3163  case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG,
3164  Subtarget);
3165  case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
3166  case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
3167  case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
3168  case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
3169  case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG, *this,
3170  hasHardQuad);
3171  case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG, *this,
3172  hasHardQuad);
3173  case ISD::FP_TO_UINT: return LowerFP_TO_UINT(Op, DAG, *this,
3174  hasHardQuad);
3175  case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG, *this,
3176  hasHardQuad);
3177  case ISD::BR_CC:
3178  return LowerBR_CC(Op, DAG, *this, hasHardQuad, isV9);
3179  case ISD::SELECT_CC:
3180  return LowerSELECT_CC(Op, DAG, *this, hasHardQuad, isV9);
3181  case ISD::VASTART: return LowerVASTART(Op, DAG, *this);
3182  case ISD::VAARG: return LowerVAARG(Op, DAG);
3184  Subtarget);
3185 
3186  case ISD::LOAD: return LowerLOAD(Op, DAG);
3187  case ISD::STORE: return LowerSTORE(Op, DAG);
3188  case ISD::FADD: return LowerF128Op(Op, DAG,
3189  getLibcallName(RTLIB::ADD_F128), 2);
3190  case ISD::FSUB: return LowerF128Op(Op, DAG,
3191  getLibcallName(RTLIB::SUB_F128), 2);
3192  case ISD::FMUL: return LowerF128Op(Op, DAG,
3193  getLibcallName(RTLIB::MUL_F128), 2);
3194  case ISD::FDIV: return LowerF128Op(Op, DAG,
3195  getLibcallName(RTLIB::DIV_F128), 2);
3196  case ISD::FSQRT: return LowerF128Op(Op, DAG,
3197  getLibcallName(RTLIB::SQRT_F128),1);
3198  case ISD::FABS:
3199  case ISD::FNEG: return LowerFNEGorFABS(Op, DAG, isV9);
3200  case ISD::FP_EXTEND: return LowerF128_FPEXTEND(Op, DAG, *this);
3201  case ISD::FP_ROUND: return LowerF128_FPROUND(Op, DAG, *this);
3202  case ISD::ADDC:
3203  case ISD::ADDE:
3204  case ISD::SUBC:
3205  case ISD::SUBE: return LowerADDC_ADDE_SUBC_SUBE(Op, DAG);
3206  case ISD::UMULO:
3207  case ISD::SMULO: return LowerUMULO_SMULO(Op, DAG, *this);
3208  case ISD::ATOMIC_LOAD:
3209  case ISD::ATOMIC_STORE: return LowerATOMIC_LOAD_STORE(Op, DAG);
3211  }
3212 }
3213 
3215  const SDLoc &DL,
3216  SelectionDAG &DAG) const {
3217  APInt V = C->getValueAPF().bitcastToAPInt();
3218  SDValue Lo = DAG.getConstant(V.zextOrTrunc(32), DL, MVT::i32);
3219  SDValue Hi = DAG.getConstant(V.lshr(32).zextOrTrunc(32), DL, MVT::i32);
3220  if (DAG.getDataLayout().isLittleEndian())
3221  std::swap(Lo, Hi);
3222  return DAG.getBuildVector(MVT::v2i32, DL, {Hi, Lo});
3223 }
3224 
3226  DAGCombinerInfo &DCI) const {
3227  SDLoc dl(N);
3228  SDValue Src = N->getOperand(0);
3229 
3230  if (isa<ConstantFPSDNode>(Src) && N->getSimpleValueType(0) == MVT::v2i32 &&
3231  Src.getSimpleValueType() == MVT::f64)
3232  return bitcastConstantFPToInt(cast<ConstantFPSDNode>(Src), dl, DCI.DAG);
3233 
3234  return SDValue();
3235 }
3236 
3238  DAGCombinerInfo &DCI) const {
3239  switch (N->getOpcode()) {
3240  default:
3241  break;
3242  case ISD::BITCAST:
3243  return PerformBITCASTCombine(N, DCI);
3244  }
3245  return SDValue();
3246 }
3247 
3250  MachineBasicBlock *BB) const {
3251  switch (MI.getOpcode()) {
3252  default: llvm_unreachable("Unknown SELECT_CC!");
3253  case SP::SELECT_CC_Int_ICC:
3254  case SP::SELECT_CC_FP_ICC:
3255  case SP::SELECT_CC_DFP_ICC:
3256  case SP::SELECT_CC_QFP_ICC:
3257  if (Subtarget->isV9())
3258  return expandSelectCC(MI, BB, SP::BPICC);
3259  return expandSelectCC(MI, BB, SP::BCOND);
3260  case SP::SELECT_CC_Int_XCC:
3261  case SP::SELECT_CC_FP_XCC:
3262  case SP::SELECT_CC_DFP_XCC:
3263  case SP::SELECT_CC_QFP_XCC:
3264  return expandSelectCC(MI, BB, SP::BPXCC);
3265  case SP::SELECT_CC_Int_FCC:
3266  case SP::SELECT_CC_FP_FCC:
3267  case SP::SELECT_CC_DFP_FCC:
3268  case SP::SELECT_CC_QFP_FCC:
3269  if (Subtarget->isV9())
3270  return expandSelectCC(MI, BB, SP::FBCOND_V9);
3271  return expandSelectCC(MI, BB, SP::FBCOND);
3272  }
3273 }
3274 
3277  unsigned BROpcode) const {
3278  const TargetInstrInfo &TII = *Subtarget->getInstrInfo();
3279  DebugLoc dl = MI.getDebugLoc();
3280  unsigned CC = (SPCC::CondCodes)MI.getOperand(3).getImm();
3281 
3282  // To "insert" a SELECT_CC instruction, we actually have to insert the
3283  // triangle control-flow pattern. The incoming instruction knows the
3284  // destination vreg to set, the condition code register to branch on, the
3285  // true/false values to select between, and the condition code for the branch.
3286  //
3287  // We produce the following control flow:
3288  // ThisMBB
3289  // | \
3290  // | IfFalseMBB
3291  // | /
3292  // SinkMBB
3293  const BasicBlock *LLVM_BB = BB->getBasicBlock();
3294  MachineFunction::iterator It = ++BB->getIterator();
3295 
3296  MachineBasicBlock *ThisMBB = BB;
3297  MachineFunction *F = BB->getParent();
3298  MachineBasicBlock *IfFalseMBB = F->CreateMachineBasicBlock(LLVM_BB);
3299  MachineBasicBlock *SinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
3300  F->insert(It, IfFalseMBB);
3301  F->insert(It, SinkMBB);
3302 
3303  // Transfer the remainder of ThisMBB and its successor edges to SinkMBB.
3304  SinkMBB->splice(SinkMBB->begin(), ThisMBB,
3305  std::next(MachineBasicBlock::iterator(MI)), ThisMBB->end());
3306  SinkMBB->transferSuccessorsAndUpdatePHIs(ThisMBB);
3307 
3308  // Set the new successors for ThisMBB.
3309  ThisMBB->addSuccessor(IfFalseMBB);
3310  ThisMBB->addSuccessor(SinkMBB);
3311 
3312  BuildMI(ThisMBB, dl, TII.get(BROpcode))
3313  .addMBB(SinkMBB)
3314  .addImm(CC);
3315 
3316  // IfFalseMBB just falls through to SinkMBB.
3317  IfFalseMBB->addSuccessor(SinkMBB);
3318 
3319  // %Result = phi [ %TrueValue, ThisMBB ], [ %FalseValue, IfFalseMBB ]
3320  BuildMI(*SinkMBB, SinkMBB->begin(), dl, TII.get(SP::PHI),
3321  MI.getOperand(0).getReg())
3322  .addReg(MI.getOperand(1).getReg())
3323  .addMBB(ThisMBB)
3324  .addReg(MI.getOperand(2).getReg())
3325  .addMBB(IfFalseMBB);
3326 
3327  MI.eraseFromParent(); // The pseudo instruction is gone now.
3328  return SinkMBB;
3329 }
3330 
3331 //===----------------------------------------------------------------------===//
3332 // Sparc Inline Assembly Support
3333 //===----------------------------------------------------------------------===//
3334 
3335 /// getConstraintType - Given a constraint letter, return the type of
3336 /// constraint it is for this target.
3339  if (Constraint.size() == 1) {
3340  switch (Constraint[0]) {
3341  default: break;
3342  case 'r':
3343  case 'f':
3344  case 'e':
3345  return C_RegisterClass;
3346  case 'I': // SIMM13
3347  return C_Immediate;
3348  }
3349  }
3350 
3351  return TargetLowering::getConstraintType(Constraint);
3352 }
3353 
3356  const char *constraint) const {
3357  ConstraintWeight weight = CW_Invalid;
3358  Value *CallOperandVal = info.CallOperandVal;
3359  // If we don't have a value, we can't do a match,
3360  // but allow it at the lowest weight.
3361  if (!CallOperandVal)
3362  return CW_Default;
3363 
3364  // Look at the constraint type.
3365  switch (*constraint) {
3366  default:
3368  break;
3369  case 'I': // SIMM13
3370  if (ConstantInt *C = dyn_cast<ConstantInt>(info.CallOperandVal)) {
3371  if (isInt<13>(C->getSExtValue()))
3372  weight = CW_Constant;
3373  }
3374  break;
3375  }
3376  return weight;
3377 }
3378 
3379 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
3380 /// vector. If it is invalid, don't add anything to Ops.
3383  std::string &Constraint,
3384  std::vector<SDValue> &Ops,
3385  SelectionDAG &DAG) const {
3386  SDValue Result;
3387 
3388  // Only support length 1 constraints for now.
3389  if (Constraint.length() > 1)
3390  return;
3391 
3392  char ConstraintLetter = Constraint[0];
3393  switch (ConstraintLetter) {
3394  default: break;
3395  case 'I':
3396  if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
3397  if (isInt<13>(C->getSExtValue())) {
3398  Result = DAG.getTargetConstant(C->getSExtValue(), SDLoc(Op),
3399  Op.getValueType());
3400  break;
3401  }
3402  return;
3403  }
3404  }
3405 
3406  if (Result.getNode()) {
3407  Ops.push_back(Result);
3408  return;
3409  }
3410  TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
3411 }
3412 
3413 std::pair<unsigned, const TargetRegisterClass *>
3415  StringRef Constraint,
3416  MVT VT) const {
3417  if (Constraint.empty())
3418  return std::make_pair(0U, nullptr);
3419 
3420  if (Constraint.size() == 1) {
3421  switch (Constraint[0]) {
3422  case 'r':
3423  if (VT == MVT::v2i32)
3424  return std::make_pair(0U, &SP::IntPairRegClass);
3425  else if (Subtarget->is64Bit())
3426  return std::make_pair(0U, &SP::I64RegsRegClass);
3427  else
3428  return std::make_pair(0U, &SP::IntRegsRegClass);
3429  case 'f':
3430  if (VT == MVT::f32 || VT == MVT::i32)
3431  return std::make_pair(0U, &SP::FPRegsRegClass);
3432  else if (VT == MVT::f64 || VT == MVT::i64)
3433  return std::make_pair(0U, &SP::LowDFPRegsRegClass);
3434  else if (VT == MVT::f128)
3435  return std::make_pair(0U, &SP::LowQFPRegsRegClass);
3436  // This will generate an error message
3437  return std::make_pair(0U, nullptr);
3438  case 'e':
3439  if (VT == MVT::f32 || VT == MVT::i32)
3440  return std::make_pair(0U, &SP::FPRegsRegClass);
3441  else if (VT == MVT::f64 || VT == MVT::i64 )
3442  return std::make_pair(0U, &SP::DFPRegsRegClass);
3443  else if (VT == MVT::f128)
3444  return std::make_pair(0U, &SP::QFPRegsRegClass);
3445  // This will generate an error message
3446  return std::make_pair(0U, nullptr);
3447  }
3448  }
3449 
3450  if (Constraint.front() != '{')
3451  return std::make_pair(0U, nullptr);
3452 
3453  assert(Constraint.back() == '}' && "Not a brace enclosed constraint?");
3454  StringRef RegName(Constraint.data() + 1, Constraint.size() - 2);
3455  if (RegName.empty())
3456  return std::make_pair(0U, nullptr);
3457 
3458  unsigned long long RegNo;
3459  // Handle numbered register aliases.
3460  if (RegName[0] == 'r' &&
3461  getAsUnsignedInteger(RegName.begin() + 1, 10, RegNo)) {
3462  // r0-r7 -> g0-g7
3463  // r8-r15 -> o0-o7
3464  // r16-r23 -> l0-l7
3465  // r24-r31 -> i0-i7
3466  if (RegNo > 31)
3467  return std::make_pair(0U, nullptr);
3468  const char RegTypes[] = {'g', 'o', 'l', 'i'};
3469  char RegType = RegTypes[RegNo / 8];
3470  char RegIndex = '0' + (RegNo % 8);
3471  char Tmp[] = {'{', RegType, RegIndex, '}', 0};
3472  return getRegForInlineAsmConstraint(TRI, Tmp, VT);
3473  }
3474 
3475  // Rewrite the fN constraint according to the value type if needed.
3476  if (VT != MVT::f32 && VT != MVT::Other && RegName[0] == 'f' &&
3477  getAsUnsignedInteger(RegName.begin() + 1, 10, RegNo)) {
3478  if (VT == MVT::f64 && (RegNo % 2 == 0)) {
3480  TRI, StringRef("{d" + utostr(RegNo / 2) + "}"), VT);
3481  } else if (VT == MVT::f128 && (RegNo % 4 == 0)) {
3483  TRI, StringRef("{q" + utostr(RegNo / 4) + "}"), VT);
3484  } else {
3485  return std::make_pair(0U, nullptr);
3486  }
3487  }
3488 
3489  auto ResultPair =
3491  if (!ResultPair.second)
3492  return std::make_pair(0U, nullptr);
3493 
3494  // Force the use of I64Regs over IntRegs for 64-bit values.
3495  if (Subtarget->is64Bit() && VT == MVT::i64) {
3496  assert(ResultPair.second == &SP::IntRegsRegClass &&
3497  "Unexpected register class");
3498  return std::make_pair(ResultPair.first, &SP::I64RegsRegClass);
3499  }
3500 
3501  return ResultPair;
3502 }
3503 
3504 bool
3506  // The Sparc target isn't yet aware of offsets.
3507  return false;
3508 }
3509 
3512  SelectionDAG &DAG) const {
3513 
3514  SDLoc dl(N);
3515 
3516  RTLIB::Libcall libCall = RTLIB::UNKNOWN_LIBCALL;
3517 
3518  switch (N->getOpcode()) {
3519  default:
3520  llvm_unreachable("Do not know how to custom type legalize this operation!");
3521 
3522  case ISD::FP_TO_SINT:
3523  case ISD::FP_TO_UINT:
3524  // Custom lower only if it involves f128 or i64.
3525  if (N->getOperand(0).getValueType() != MVT::f128
3526  || N->getValueType(0) != MVT::i64)
3527  return;
3528  libCall = ((N->getOpcode() == ISD::FP_TO_SINT)
3529  ? RTLIB::FPTOSINT_F128_I64
3530  : RTLIB::FPTOUINT_F128_I64);
3531 
3532  Results.push_back(LowerF128Op(SDValue(N, 0),
3533  DAG,
3534  getLibcallName(libCall),
3535  1));
3536  return;
3537  case ISD::READCYCLECOUNTER: {
3538  assert(Subtarget->hasLeonCycleCounter());
3539  SDValue Lo = DAG.getCopyFromReg(N->getOperand(0), dl, SP::ASR23, MVT::i32);
3540  SDValue Hi = DAG.getCopyFromReg(Lo, dl, SP::G0, MVT::i32);
3541  SDValue Ops[] = { Lo, Hi };
3542  SDValue Pair = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Ops);
3543  Results.push_back(Pair);
3544  Results.push_back(N->getOperand(0));
3545  return;
3546  }
3547  case ISD::SINT_TO_FP:
3548  case ISD::UINT_TO_FP:
3549  // Custom lower only if it involves f128 or i64.
3550  if (N->getValueType(0) != MVT::f128
3551  || N->getOperand(0).getValueType() != MVT::i64)
3552  return;
3553 
3554  libCall = ((N->getOpcode() == ISD::SINT_TO_FP)
3555  ? RTLIB::SINTTOFP_I64_F128
3556  : RTLIB::UINTTOFP_I64_F128);
3557 
3558  Results.push_back(LowerF128Op(SDValue(N, 0),
3559  DAG,
3560  getLibcallName(libCall),
3561  1));
3562  return;
3563  case ISD::LOAD: {
3564  LoadSDNode *Ld = cast<LoadSDNode>(N);
3565  // Custom handling only for i64: turn i64 load into a v2i32 load,
3566  // and a bitcast.
3567  if (Ld->getValueType(0) != MVT::i64 || Ld->getMemoryVT() != MVT::i64)
3568  return;
3569 
3570  SDLoc dl(N);
3571  SDValue LoadRes = DAG.getExtLoad(
3572  Ld->getExtensionType(), dl, MVT::v2i32, Ld->getChain(),
3573  Ld->getBasePtr(), Ld->getPointerInfo(), MVT::v2i32,
3574  Ld->getOriginalAlign(), Ld->getMemOperand()->getFlags(),
3575  Ld->getAAInfo());
3576 
3577  SDValue Res = DAG.getNode(ISD::BITCAST, dl, MVT::i64, LoadRes);
3578  Results.push_back(Res);
3579  Results.push_back(LoadRes.getValue(1));
3580  return;
3581  }
3582  }
3583 }
3584 
3585 // Override to enable LOAD_STACK_GUARD lowering on Linux.
3587  if (!Subtarget->isTargetLinux())
3589  return true;
3590 }
3591 
3592 // Override to disable global variable loading on Linux.
3594  if (!Subtarget->isTargetLinux())
3596 }
llvm::ISD::SUB
@ SUB
Definition: ISDOpcodes.h:240
llvm::StringSwitch::Case
StringSwitch & Case(StringLiteral S, T Value)
Definition: StringSwitch.h:69
llvm::CCValAssign::getLocVT
MVT getLocVT() const
Definition: CallingConvLower.h:151
llvm::MachineRegisterInfo::addLiveIn
void addLiveIn(MCRegister Reg, Register vreg=Register())
addLiveIn - Add the specified register as a live-in.
Definition: MachineRegisterInfo.h:959
llvm::SparcMCExpr::VK_Sparc_TLS_IE_LO10
@ VK_Sparc_TLS_IE_LO10
Definition: SparcMCExpr.h:56
llvm::SelectionDAG::getMemcpy
SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, bool AlwaysInline, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo=AAMDNodes(), AAResults *AA=nullptr)
Definition: SelectionDAG.cpp:7391
i
i
Definition: README.txt:29
llvm::ISD::SETUGE
@ SETUGE
Definition: ISDOpcodes.h:1437
llvm::SparcRegisterInfo
Definition: SparcRegisterInfo.h:22
llvm::alignTo
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition: Alignment.h:156
L5
to esp esp setne al movzbw ax esp setg cl movzbw cx cmove cx cl jne LBB1_2 esp which is much esp edx eax decl edx jle L7 L5
Definition: README.txt:656
llvm::LoadSDNode::getOffset
const SDValue & getOffset() const
Definition: SelectionDAGNodes.h:2364
llvm::SPISD::GLOBAL_BASE_REG
@ GLOBAL_BASE_REG
Definition: SparcISelLowering.h:48
llvm::CCValAssign::ZExt
@ ZExt
Definition: CallingConvLower.h:36
llvm::SPISD::TLS_ADD
@ TLS_ADD
Definition: SparcISelLowering.h:53
toCallerWindow
static unsigned toCallerWindow(unsigned Reg)
Definition: SparcISelLowering.cpp:224
llvm::Argument
This class represents an incoming formal argument to a Function.
Definition: Argument.h:28
llvm::ConstantSDNode
Definition: SelectionDAGNodes.h:1582
llvm::RegisterSDNode
Definition: SelectionDAGNodes.h:2161
LowerATOMIC_LOAD_STORE
static SDValue LowerATOMIC_LOAD_STORE(SDValue Op, SelectionDAG &DAG)
Definition: SparcISelLowering.cpp:3129
llvm::StoreSDNode::getBasePtr
const SDValue & getBasePtr() const
Definition: SelectionDAGNodes.h:2394
llvm::RISCVAttrs::StackAlign
StackAlign
Definition: RISCVAttributes.h:37
LowerVASTART
static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI)
Definition: SparcISelLowering.cpp:2641
SparcRegisterInfo.h
llvm::SPCC::ICC_CS
@ ICC_CS
Definition: Sparc.h:52
llvm::SparcMCExpr::VK_Sparc_TLS_IE_ADD
@ VK_Sparc_TLS_IE_ADD
Definition: SparcMCExpr.h:59
llvm::SelectionDAG::getCALLSEQ_START
SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)
Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...
Definition: SelectionDAG.h:988
llvm::ISD::SETLE
@ SETLE
Definition: ISDOpcodes.h:1448
llvm::SPISD::ITOF
@ ITOF
Definition: SparcISelLowering.h:42
llvm::ISD::SETO
@ SETO
Definition: ISDOpcodes.h:1433
MI
IRTranslator LLVM IR MI
Definition: IRTranslator.cpp:109
llvm::ISD::UMULO
@ UMULO
Definition: ISDOpcodes.h:332
llvm::MachineInstrBuilder::addImm
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
Definition: MachineInstrBuilder.h:131
llvm
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
llvm::ISD::ArgFlagsTy::isSRet
bool isSRet() const
Definition: TargetCallingConv.h:82
llvm::SparcMCExpr::VK_Sparc_L44
@ VK_Sparc_L44
Definition: SparcMCExpr.h:31
M
We currently emits eax Perhaps this is what we really should generate is Is imull three or four cycles eax eax The current instruction priority is based on pattern complexity The former is more complex because it folds a load so the latter will not be emitted Perhaps we should use AddedComplexity to give LEA32r a higher priority We should always try to match LEA first since the LEA matching code does some estimate to determine whether the match is profitable if we care more about code then imull is better It s two bytes shorter than movl leal On a Pentium M
Definition: README.txt:252
llvm::SDNode::getValueType
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
Definition: SelectionDAGNodes.h:986
llvm::PICLevel::SmallPIC
@ SmallPIC
Definition: CodeGen.h:33
Analyze_CC_Sparc64_Half
static bool Analyze_CC_Sparc64_Half(bool IsReturn, unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
Definition: SparcISelLowering.cpp:154
llvm::SDLoc
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Definition: SelectionDAGNodes.h:1106
llvm::SPCC::FCC_G
@ FCC_G
Definition: Sparc.h:61
llvm::CCValAssign::Full
@ Full
Definition: CallingConvLower.h:34
llvm::TargetLoweringBase::Legal
@ Legal
Definition: TargetLowering.h:196
llvm::DataLayout
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:113
llvm::ISD::OR
@ OR
Definition: ISDOpcodes.h:667
llvm::SparcTargetLowering::LowerConstantPool
SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const
Definition: SparcISelLowering.cpp:2139
llvm::TargetMachine::useEmulatedTLS
bool useEmulatedTLS() const
Returns true if this target uses emulated TLS.
Definition: TargetMachine.cpp:146
llvm::ISD::SETGT
@ SETGT
Definition: ISDOpcodes.h:1445
llvm::ISD::BITCAST
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
Definition: ISDOpcodes.h:886
llvm::ISD::SETNE
@ SETNE
Definition: ISDOpcodes.h:1449
llvm::MachineRegisterInfo::createVirtualRegister
Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
Definition: MachineRegisterInfo.cpp:156
llvm::TargetLowering::getSingleConstraintMatchWeight
virtual ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const
Examine constraint string and operand type and determine a weight value.
Definition: TargetLowering.cpp:5585
llvm::MachineFrameInfo::setReturnAddressIsTaken
void setReturnAddressIsTaken(bool s)
Definition: MachineFrameInfo.h:378
llvm::TargetLowering::ConstraintType
ConstraintType
Definition: TargetLowering.h:4569
llvm::ISD::BR_JT
@ BR_JT
BR_JT - Jumptable branch.
Definition: ISDOpcodes.h:991
PHI
Rewrite undef for PHI
Definition: AMDGPURewriteUndefForPHI.cpp:101
llvm::KnownBits::resetAll
void resetAll()
Resets the known state of all bits.
Definition: KnownBits.h:66
llvm::SparcTargetLowering::isOffsetFoldingLegal
bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override
Return true if folding a constant offset with the given GlobalAddress is legal.
Definition: SparcISelLowering.cpp:3505
llvm::TargetLowering::CallLoweringInfo::setChain
CallLoweringInfo & setChain(SDValue InChain)
Definition: TargetLowering.h:4198
llvm::SparcMCExpr::VK_Sparc_HM
@ VK_Sparc_HM
Definition: SparcMCExpr.h:33
llvm::CCState
CCState - This class holds information needed while lowering arguments and return values.
Definition: CallingConvLower.h:189
llvm::ISD::AssertSext
@ AssertSext
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
Definition: ISDOpcodes.h:61
llvm::SelectionDAG::getCopyToReg
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, unsigned Reg, SDValue N)
Definition: SelectionDAG.h:767
llvm::SPCC::FCC_LE
@ FCC_LE
Definition: Sparc.h:71
llvm::EVT::getFixedSizeInBits
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
Definition: ValueTypes.h:348
llvm::MachineRegisterInfo
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Definition: MachineRegisterInfo.h:50
llvm::SDValue::getNode
SDNode * getNode() const
get the SDNode which holds the desired result
Definition: SelectionDAGNodes.h:159
llvm::StringRef::front
char front() const
front - Get the first character in the string.
Definition: StringRef.h:140
llvm::isOneConstant
bool isOneConstant(SDValue V)
Returns true if V is a constant integer one.
Definition: SelectionDAG.cpp:10863
llvm::Function
Definition: Function.h:60
llvm::SPCC::ICC_LE
@ ICC_LE
Definition: Sparc.h:46
llvm::ISD::BSWAP
@ BSWAP
Byte Swap and Counting operators.
Definition: ISDOpcodes.h:700
llvm::ISD::UDIV
@ UDIV
Definition: ISDOpcodes.h:243
is64Bit
static bool is64Bit(const char *name)
Definition: X86Disassembler.cpp:1018
llvm::MVT::i128
@ i128
Definition: MachineValueType.h:50
llvm::ISD::DYNAMIC_STACKALLOC
@ DYNAMIC_STACKALLOC
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary.
Definition: ISDOpcodes.h:976
llvm::SelectionDAG::getValueType
SDValue getValueType(EVT)
Definition: SelectionDAG.cpp:1859
llvm::CCState::addLoc
void addLoc(const CCValAssign &V)
Definition: CallingConvLower.h:251
IntCondCCodeToICC
static SPCC::CondCodes IntCondCCodeToICC(ISD::CondCode CC)
IntCondCCodeToICC - Convert a DAG integer condition code to a SPARC ICC condition.
Definition: SparcISelLowering.cpp:1478
CC_Sparc64_Full
static bool CC_Sparc64_Full(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
Definition: SparcISelLowering.cpp:191
llvm::SparcSubtarget::isV9
bool isV9() const
Definition: SparcSubtarget.h:81
llvm::ISD::ADDC
@ ADDC
Carry-setting nodes for multiple precision addition and subtraction.
Definition: ISDOpcodes.h:269
llvm::SPCC::FCC_UE
@ FCC_UE
Definition: Sparc.h:68
llvm::TLSModel::GeneralDynamic
@ GeneralDynamic
Definition: CodeGen.h:43
llvm::CodeModel::Medium
@ Medium
Definition: CodeGen.h:28
llvm::AtomicRMWInst::getOperation
BinOp getOperation() const
Definition: Instructions.h:801
llvm::TargetLoweringBase::setMinCmpXchgSizeInBits
void setMinCmpXchgSizeInBits(unsigned SizeInBits)
Sets the minimum cmpxchg or ll/sc size supported by the backend.
Definition: TargetLowering.h:2556
llvm::SelectionDAG::getFrameIndex
SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
Definition: SelectionDAG.cpp:1740
llvm::SmallVector
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1199
llvm::ISD::SETEQ
@ SETEQ
Definition: ISDOpcodes.h:1444
llvm::SPCC::ICC_L
@ ICC_L
Definition: Sparc.h:48
llvm::ISD::STACKRESTORE
@ STACKRESTORE
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain.
Definition: ISDOpcodes.h:1057
llvm::SelectionDAG::getVTList
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
Definition: SelectionDAG.cpp:9439
llvm::SparcMCExpr::VK_Sparc_TLS_LE_LOX10
@ VK_Sparc_TLS_LE_LOX10
Definition: SparcMCExpr.h:61
llvm::SparcMachineFunctionInfo::setSRetReturnReg
void setSRetReturnReg(Register Reg)
Definition: SparcMachineFunctionInfo.h:53
ErrorHandling.h
llvm::SelectionDAG::getRoot
const SDValue & getRoot() const
Return the root tag of the SelectionDAG.
Definition: SelectionDAG.h:545
llvm::X86Disassembler::Reg
Reg
All possible values of the reg field in the ModR/M byte.
Definition: X86DisassemblerDecoder.h:462
llvm::SparcTargetLowering::getRegisterByName
Register getRegisterByName(const char *RegName, LLT VT, const MachineFunction &MF) const override
Return the register ID of the name passed in.
Definition: SparcISelLowering.cpp:1114
llvm::SparcMCExpr::VK_Sparc_TLS_LDO_ADD
@ VK_Sparc_TLS_LDO_ADD
Definition: SparcMCExpr.h:54
llvm::APInt::zextOrTrunc
APInt zextOrTrunc(unsigned width) const
Zero extend or truncate to width.
Definition: APInt.cpp:994
llvm::SparcTargetLowering::useLoadStackGuardNode
bool useLoadStackGuardNode() const override
Override to support customized stack guard loading.
Definition: SparcISelLowering.cpp:3586
llvm::MemSDNode::getMemoryVT
EVT getMemoryVT() const
Return the type of the in-memory value.
Definition: SelectionDAGNodes.h:1355
llvm::MemSDNode::getChain
const SDValue & getChain() const
Definition: SelectionDAGNodes.h:1378
llvm::ISD::ANY_EXTEND
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
Definition: ISDOpcodes.h:766
llvm::SDNode
Represents one node in the SelectionDAG.
Definition: SelectionDAGNodes.h:463
llvm::TargetLoweringBase::getLibcallName
const char * getLibcallName(RTLIB::Libcall Call) const
Get the libcall routine name for the specified libcall.
Definition: TargetLowering.h:3168
llvm::TargetLowering::CW_Constant
@ CW_Constant
Definition: TargetLowering.h:4591
llvm::ISD::FMA
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
Definition: ISDOpcodes.h:482
llvm::ISD::FP_TO_SINT
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
Definition: ISDOpcodes.h:819
llvm::TargetLowering::DAGCombinerInfo::DAG
SelectionDAG & DAG
Definition: TargetLowering.h:3929
llvm::SparcTargetLowering::LowerINTRINSIC_WO_CHAIN
SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const
Definition: SparcISelLowering.cpp:3139
llvm::LoadSDNode
This class is used to represent ISD::LOAD nodes.
Definition: SelectionDAGNodes.h:2344
llvm::TargetLowering::CallLoweringInfo::setCallee
CallLoweringInfo & setCallee(CallingConv::ID CC, Type *ResultType, SDValue Target, ArgListTy &&ArgsList)
Definition: TargetLowering.h:4217
llvm::MVT::Glue
@ Glue
Definition: MachineValueType.h:282
llvm::SparcSubtarget::getRegisterInfo
const SparcRegisterInfo * getRegisterInfo() const override
Definition: SparcSubtarget.h:68
llvm::SPISD::CMPFCC_V9
@ CMPFCC_V9
Definition: SparcISelLowering.h:28
llvm::TargetRegisterInfo
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
Definition: TargetRegisterInfo.h:236
llvm::Depth
@ Depth
Definition: SIMachineScheduler.h:36
llvm::TargetLowering::isPositionIndependent
bool isPositionIndependent() const
Definition: TargetLowering.cpp:46
llvm::SPCC::FCC_UG
@ FCC_UG
Definition: Sparc.h:62
llvm::ISD::SETULE
@ SETULE
Definition: ISDOpcodes.h:1439
llvm::CCState::AnalyzeFormalArguments
void AnalyzeFormalArguments(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
AnalyzeFormalArguments - Analyze an array of argument values, incorporating info about the formals in...
Definition: CallingConvLower.cpp:82
llvm::Type
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
llvm::RTLIB::Libcall
Libcall
RTLIB::Libcall enum - This enum defines all of the runtime library calls the backend can emit.
Definition: RuntimeLibcalls.h:30
llvm::SparcTargetLowering::LowerF128_LibCallArg
SDValue LowerF128_LibCallArg(SDValue Chain, ArgListTy &Args, SDValue Arg, const SDLoc &DL, SelectionDAG &DAG) const
Definition: SparcISelLowering.cpp:2253
Module.h
llvm::SPCC::FCC_O
@ FCC_O
Definition: Sparc.h:73
llvm::ISD::SHL_PARTS
@ SHL_PARTS
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.
Definition: ISDOpcodes.h:749
llvm::SelectionDAG::getStore
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
Definition: SelectionDAG.cpp:8099
llvm::SPCC::ICC_LEU
@ ICC_LEU
Definition: Sparc.h:50
llvm::ISD::SETCC
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
Definition: ISDOpcodes.h:736
llvm::SparcTargetLowering::LowerReturn_32
SDValue LowerReturn_32(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, const SmallVectorImpl< SDValue > &OutVals, const SDLoc &DL, SelectionDAG &DAG) const
Definition: SparcISelLowering.cpp:253
llvm::SparcTargetLowering
Definition: SparcISelLowering.h:61
llvm::SparcSubtarget::getFrameLowering
const TargetFrameLowering * getFrameLowering() const override
Definition: SparcSubtarget.h:65
llvm::TargetLowering::CallLoweringInfo::CB
const CallBase * CB
Definition: TargetLowering.h:4180
llvm::CCValAssign::BCvt
@ BCvt
Definition: CallingConvLower.h:44
llvm::TargetLoweringBase::setMinFunctionAlignment
void setMinFunctionAlignment(Align Alignment)
Set the target's minimum function alignment.
Definition: TargetLowering.h:2512
llvm::TargetLowering::LowerCallTo
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
This function lowers an abstract call to a function into an actual call.
Definition: SelectionDAGBuilder.cpp:9814
LowerF128_FPEXTEND
static SDValue LowerF128_FPEXTEND(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI)
Definition: SparcISelLowering.cpp:2432
llvm::SparcMCExpr::VK_Sparc_TLS_LDM_ADD
@ VK_Sparc_TLS_LDM_ADD
Definition: SparcMCExpr.h:50
llvm::ore::NV
DiagnosticInfoOptimizationBase::Argument NV
Definition: OptimizationRemarkEmitter.h:136
llvm::tgtok::FalseVal
@ FalseVal
Definition: TGLexer.h:62
Results
Function Alias Analysis Results
Definition: AliasAnalysis.cpp:772
llvm::TargetLoweringBase::getVectorIdxTy
virtual MVT getVectorIdxTy(const DataLayout &DL) const
Returns the type to be used for the index operand of: ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT...
Definition: TargetLowering.h:410
fixupVariableFloatArgs
static void fixupVariableFloatArgs(SmallVectorImpl< CCValAssign > &ArgLocs, ArrayRef< ISD::OutputArg > Outs)
Definition: SparcISelLowering.cpp:1141
llvm::max
Expected< ExpressionValue > max(const ExpressionValue &Lhs, const ExpressionValue &Rhs)
Definition: FileCheck.cpp:337
llvm::MipsISD::Ret
@ Ret
Definition: MipsISelLowering.h:119
llvm::ISD::VAEND
@ VAEND
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE.
Definition: ISDOpcodes.h:1086
llvm::ISD::EXTLOAD
@ EXTLOAD
Definition: ISDOpcodes.h:1404
llvm::APInt::lshr
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
Definition: APInt.h:832
RHS
Value * RHS
Definition: X86PartialReduction.cpp:76
llvm::SPCC::CondCodes
CondCodes
Definition: Sparc.h:40
llvm::ISD::SETOEQ
@ SETOEQ
Definition: ISDOpcodes.h:1427
llvm::BlockAddressSDNode
Definition: SelectionDAGNodes.h:2195
llvm::SparcMCExpr::VK_Sparc_TLS_LDM_HI22
@ VK_Sparc_TLS_LDM_HI22
Definition: SparcMCExpr.h:48
SelectionDAG.h
llvm::SPII::Load
@ Load
Definition: SparcInstrInfo.h:32
TRI
unsigned const TargetRegisterInfo * TRI
Definition: MachineSink.cpp:1628
SparcISelLowering.h
llvm::SparcTargetLowering::EmitInstrWithCustomInserter
MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *MBB) const override
This method should be implemented by targets that mark instructions with the 'usesCustomInserter' fla...
Definition: SparcISelLowering.cpp:3249
llvm::SPISD::FIRST_NUMBER
@ FIRST_NUMBER
Definition: SparcISelLowering.h:25
llvm::Type::getInt32Ty
static IntegerType * getInt32Ty(LLVMContext &C)
Definition: Type.cpp:238
llvm::ISD::SETUEQ
@ SETUEQ
Definition: ISDOpcodes.h:1435
llvm::CCState::AnalyzeCallOperands
void AnalyzeCallOperands(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
AnalyzeCallOperands - Analyze the outgoing arguments to a call, incorporating info about the passed v...
Definition: CallingConvLower.cpp:123
llvm::SelectionDAG::getContext
LLVMContext * getContext() const
Definition: SelectionDAG.h:479
llvm::ISD::FABS
@ FABS
Definition: ISDOpcodes.h:912
llvm::SparcMCExpr::VK_Sparc_M44
@ VK_Sparc_M44
Definition: SparcMCExpr.h:30
llvm::commonAlignment
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
Definition: Alignment.h:213
F
#define F(x, y, z)
Definition: MD5.cpp:55
llvm::ISD::BRCOND
@ BRCOND
BRCOND - Conditional branch.
Definition: ISDOpcodes.h:1001
MachineRegisterInfo.h
KnownBits.h
getFRAMEADDR
static SDValue getFRAMEADDR(uint64_t depth, SDValue Op, SelectionDAG &DAG, const SparcSubtarget *Subtarget, bool AlwaysFlush=false)
Definition: SparcISelLowering.cpp:2757
llvm::SPISD::BRFCC
@ BRFCC
Definition: SparcISelLowering.h:32
llvm::SelectionDAG::getRegister
SDValue getRegister(unsigned Reg, EVT VT)
Definition: SelectionDAG.cpp:2122
llvm::BasicBlock
LLVM Basic Block Representation.
Definition: BasicBlock.h:55
llvm::SparcMCExpr::VK_Sparc_TLS_GD_CALL
@ VK_Sparc_TLS_GD_CALL
Definition: SparcMCExpr.h:47
SparcTargetObjectFile.h
LookThroughSetCC
static void LookThroughSetCC(SDValue &LHS, SDValue &RHS, ISD::CondCode CC, unsigned &SPCC)
Definition: SparcISelLowering.cpp:2018
CC_Sparc_Assign_SRet
static bool CC_Sparc_Assign_SRet(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
Definition: SparcISelLowering.cpp:41
llvm::ISD::BRIND
@ BRIND
BRIND - Indirect branch.
Definition: ISDOpcodes.h:987
llvm::ISD::ROTL
@ ROTL
Definition: ISDOpcodes.h:694
llvm::SPCC::FCC_ULE
@ FCC_ULE
Definition: Sparc.h:72
Context
LLVMContext & Context
Definition: NVVMIntrRange.cpp:66
llvm::TargetLoweringBase::setTargetDAGCombine
void setTargetDAGCombine(ArrayRef< ISD::NodeType > NTs)
Targets should invoke this method for each target independent node that they want to provide a custom...
Definition: TargetLowering.h:2504
llvm::SPISD::FLUSHW
@ FLUSHW
Definition: SparcISelLowering.h:49
Arg
amdgpu Simplify well known AMD library false FunctionCallee Value * Arg
Definition: AMDGPULibCalls.cpp:187
llvm::MVT::integer_valuetypes
static auto integer_valuetypes()
Definition: MachineValueType.h:1519
llvm::MachineBasicBlock::addSuccessor
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
Definition: MachineBasicBlock.cpp:767
llvm::CallInfo
Definition: GVNHoist.cpp:217
CC_Sparc_Assign_Split_64
static bool CC_Sparc_Assign_Split_64(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
Definition: SparcISelLowering.cpp:54
llvm::BitmaskEnumDetail::Mask
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
Definition: BitmaskEnum.h:80
llvm::SparcTargetLowering::CanLowerReturn
bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF, bool isVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, LLVMContext &Context) const override
This hook should be implemented to check whether the return values described by the Outs array can fi...
Definition: SparcISelLowering.cpp:232
llvm::SPCC::FCC_LG
@ FCC_LG
Definition: Sparc.h:65
LHS
Value * LHS
Definition: X86PartialReduction.cpp:75
llvm::ISD::BR_CC
@ BR_CC
BR_CC - Conditional branch.
Definition: ISDOpcodes.h:1008
LowerF128_FPROUND
static SDValue LowerF128_FPROUND(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI)
Definition: SparcISelLowering.cpp:2448
llvm::ConstantInt
This is the shared class of boolean and integer constants.
Definition: Constants.h:79
llvm::SelectionDAG::getLoad
SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
Definition: SelectionDAG.cpp:8049
llvm::SparcMCExpr::VK_Sparc_TLS_LE_HIX22
@ VK_Sparc_TLS_LE_HIX22
Definition: SparcMCExpr.h:60
llvm::MVT::i1
@ i1
Definition: MachineValueType.h:43
llvm::SDNode::getOpcode
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
Definition: SelectionDAGNodes.h:644
llvm::TargetLowering::CallLoweringInfo::IsVarArg
bool IsVarArg
Definition: TargetLowering.h:4