LLVM  13.0.0git
SparcISelLowering.cpp
Go to the documentation of this file.
1 //===-- SparcISelLowering.cpp - Sparc DAG Lowering Implementation ---------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the interfaces that Sparc uses to lower LLVM code into a
10 // selection DAG.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "SparcISelLowering.h"
17 #include "SparcRegisterInfo.h"
18 #include "SparcTargetMachine.h"
19 #include "SparcTargetObjectFile.h"
20 #include "llvm/ADT/StringExtras.h"
21 #include "llvm/ADT/StringSwitch.h"
29 #include "llvm/IR/DerivedTypes.h"
30 #include "llvm/IR/Function.h"
31 #include "llvm/IR/Module.h"
33 #include "llvm/Support/KnownBits.h"
34 using namespace llvm;
35 
36 
37 //===----------------------------------------------------------------------===//
38 // Calling Convention Implementation
39 //===----------------------------------------------------------------------===//
40 
41 static bool CC_Sparc_Assign_SRet(unsigned &ValNo, MVT &ValVT,
42  MVT &LocVT, CCValAssign::LocInfo &LocInfo,
43  ISD::ArgFlagsTy &ArgFlags, CCState &State)
44 {
45  assert (ArgFlags.isSRet());
46 
47  // Assign SRet argument.
48  State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT,
49  0,
50  LocVT, LocInfo));
51  return true;
52 }
53 
54 static bool CC_Sparc_Assign_Split_64(unsigned &ValNo, MVT &ValVT,
55  MVT &LocVT, CCValAssign::LocInfo &LocInfo,
56  ISD::ArgFlagsTy &ArgFlags, CCState &State)
57 {
58  static const MCPhysReg RegList[] = {
59  SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5
60  };
61  // Try to get first reg.
62  if (Register Reg = State.AllocateReg(RegList)) {
63  State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
64  } else {
65  // Assign whole thing in stack.
67  ValNo, ValVT, State.AllocateStack(8, Align(4)), LocVT, LocInfo));
68  return true;
69  }
70 
71  // Try to get second reg.
72  if (Register Reg = State.AllocateReg(RegList))
73  State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
74  else
76  ValNo, ValVT, State.AllocateStack(4, Align(4)), LocVT, LocInfo));
77  return true;
78 }
79 
80 static bool CC_Sparc_Assign_Ret_Split_64(unsigned &ValNo, MVT &ValVT,
81  MVT &LocVT, CCValAssign::LocInfo &LocInfo,
82  ISD::ArgFlagsTy &ArgFlags, CCState &State)
83 {
84  static const MCPhysReg RegList[] = {
85  SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5
86  };
87 
88  // Try to get first reg.
89  if (Register Reg = State.AllocateReg(RegList))
90  State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
91  else
92  return false;
93 
94  // Try to get second reg.
95  if (Register Reg = State.AllocateReg(RegList))
96  State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
97  else
98  return false;
99 
100  return true;
101 }
102 
103 // Allocate a full-sized argument for the 64-bit ABI.
104 static bool CC_Sparc64_Full(unsigned &ValNo, MVT &ValVT,
105  MVT &LocVT, CCValAssign::LocInfo &LocInfo,
106  ISD::ArgFlagsTy &ArgFlags, CCState &State) {
107  assert((LocVT == MVT::f32 || LocVT == MVT::f128
108  || LocVT.getSizeInBits() == 64) &&
109  "Can't handle non-64 bits locations");
110 
111  // Stack space is allocated for all arguments starting from [%fp+BIAS+128].
112  unsigned size = (LocVT == MVT::f128) ? 16 : 8;
113  Align alignment = (LocVT == MVT::f128) ? Align(16) : Align(8);
114  unsigned Offset = State.AllocateStack(size, alignment);
115  unsigned Reg = 0;
116 
117  if (LocVT == MVT::i64 && Offset < 6*8)
118  // Promote integers to %i0-%i5.
119  Reg = SP::I0 + Offset/8;
120  else if (LocVT == MVT::f64 && Offset < 16*8)
121  // Promote doubles to %d0-%d30. (Which LLVM calls D0-D15).
122  Reg = SP::D0 + Offset/8;
123  else if (LocVT == MVT::f32 && Offset < 16*8)
124  // Promote floats to %f1, %f3, ...
125  Reg = SP::F1 + Offset/4;
126  else if (LocVT == MVT::f128 && Offset < 16*8)
127  // Promote long doubles to %q0-%q28. (Which LLVM calls Q0-Q7).
128  Reg = SP::Q0 + Offset/16;
129 
130  // Promote to register when possible, otherwise use the stack slot.
131  if (Reg) {
132  State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
133  return true;
134  }
135 
136  // This argument goes on the stack in an 8-byte slot.
137  // When passing floats, LocVT is smaller than 8 bytes. Adjust the offset to
138  // the right-aligned float. The first 4 bytes of the stack slot are undefined.
139  if (LocVT == MVT::f32)
140  Offset += 4;
141 
142  State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
143  return true;
144 }
145 
146 // Allocate a half-sized argument for the 64-bit ABI.
147 //
148 // This is used when passing { float, int } structs by value in registers.
149 static bool CC_Sparc64_Half(unsigned &ValNo, MVT &ValVT,
150  MVT &LocVT, CCValAssign::LocInfo &LocInfo,
151  ISD::ArgFlagsTy &ArgFlags, CCState &State) {
152  assert(LocVT.getSizeInBits() == 32 && "Can't handle non-32 bits locations");
153  unsigned Offset = State.AllocateStack(4, Align(4));
154 
155  if (LocVT == MVT::f32 && Offset < 16*8) {
156  // Promote floats to %f0-%f31.
157  State.addLoc(CCValAssign::getReg(ValNo, ValVT, SP::F0 + Offset/4,
158  LocVT, LocInfo));
159  return true;
160  }
161 
162  if (LocVT == MVT::i32 && Offset < 6*8) {
163  // Promote integers to %i0-%i5, using half the register.
164  unsigned Reg = SP::I0 + Offset/8;
165  LocVT = MVT::i64;
166  LocInfo = CCValAssign::AExt;
167 
168  // Set the Custom bit if this i32 goes in the high bits of a register.
169  if (Offset % 8 == 0)
170  State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg,
171  LocVT, LocInfo));
172  else
173  State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
174  return true;
175  }
176 
177  State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
178  return true;
179 }
180 
181 #include "SparcGenCallingConv.inc"
182 
183 // The calling conventions in SparcCallingConv.td are described in terms of the
184 // callee's register window. This function translates registers to the
185 // corresponding caller window %o register.
186 static unsigned toCallerWindow(unsigned Reg) {
187  static_assert(SP::I0 + 7 == SP::I7 && SP::O0 + 7 == SP::O7,
188  "Unexpected enum");
189  if (Reg >= SP::I0 && Reg <= SP::I7)
190  return Reg - SP::I0 + SP::O0;
191  return Reg;
192 }
193 
194 SDValue
196  bool IsVarArg,
198  const SmallVectorImpl<SDValue> &OutVals,
199  const SDLoc &DL, SelectionDAG &DAG) const {
200  if (Subtarget->is64Bit())
201  return LowerReturn_64(Chain, CallConv, IsVarArg, Outs, OutVals, DL, DAG);
202  return LowerReturn_32(Chain, CallConv, IsVarArg, Outs, OutVals, DL, DAG);
203 }
204 
205 SDValue
207  bool IsVarArg,
209  const SmallVectorImpl<SDValue> &OutVals,
210  const SDLoc &DL, SelectionDAG &DAG) const {
212 
213  // CCValAssign - represent the assignment of the return value to locations.
215 
216  // CCState - Info about the registers and stack slot.
217  CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
218  *DAG.getContext());
219 
220  // Analyze return values.
221  CCInfo.AnalyzeReturn(Outs, RetCC_Sparc32);
222 
223  SDValue Flag;
224  SmallVector<SDValue, 4> RetOps(1, Chain);
225  // Make room for the return address offset.
226  RetOps.push_back(SDValue());
227 
228  // Copy the result values into the output registers.
229  for (unsigned i = 0, realRVLocIdx = 0;
230  i != RVLocs.size();
231  ++i, ++realRVLocIdx) {
232  CCValAssign &VA = RVLocs[i];
233  assert(VA.isRegLoc() && "Can only return in registers!");
234 
235  SDValue Arg = OutVals[realRVLocIdx];
236 
237  if (VA.needsCustom()) {
238  assert(VA.getLocVT() == MVT::v2i32);
239  // Legalize ret v2i32 -> ret 2 x i32 (Basically: do what would
240  // happen by default if this wasn't a legal type)
241 
243  Arg,
244  DAG.getConstant(0, DL, getVectorIdxTy(DAG.getDataLayout())));
246  Arg,
247  DAG.getConstant(1, DL, getVectorIdxTy(DAG.getDataLayout())));
248 
249  Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Part0, Flag);
250  Flag = Chain.getValue(1);
251  RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
252  VA = RVLocs[++i]; // skip ahead to next loc
253  Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Part1,
254  Flag);
255  } else
256  Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Arg, Flag);
257 
258  // Guarantee that all emitted copies are stuck together with flags.
259  Flag = Chain.getValue(1);
260  RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
261  }
262 
263  unsigned RetAddrOffset = 8; // Call Inst + Delay Slot
264  // If the function returns a struct, copy the SRetReturnReg to I0
265  if (MF.getFunction().hasStructRetAttr()) {
267  Register Reg = SFI->getSRetReturnReg();
268  if (!Reg)
269  llvm_unreachable("sret virtual register not created in the entry block");
270  auto PtrVT = getPointerTy(DAG.getDataLayout());
271  SDValue Val = DAG.getCopyFromReg(Chain, DL, Reg, PtrVT);
272  Chain = DAG.getCopyToReg(Chain, DL, SP::I0, Val, Flag);
273  Flag = Chain.getValue(1);
274  RetOps.push_back(DAG.getRegister(SP::I0, PtrVT));
275  RetAddrOffset = 12; // CallInst + Delay Slot + Unimp
276  }
277 
278  RetOps[0] = Chain; // Update chain.
279  RetOps[1] = DAG.getConstant(RetAddrOffset, DL, MVT::i32);
280 
281  // Add the flag if we have it.
282  if (Flag.getNode())
283  RetOps.push_back(Flag);
284 
285  return DAG.getNode(SPISD::RET_FLAG, DL, MVT::Other, RetOps);
286 }
287 
288 // Lower return values for the 64-bit ABI.
289 // Return values are passed the exactly the same way as function arguments.
290 SDValue
292  bool IsVarArg,
294  const SmallVectorImpl<SDValue> &OutVals,
295  const SDLoc &DL, SelectionDAG &DAG) const {
296  // CCValAssign - represent the assignment of the return value to locations.
298 
299  // CCState - Info about the registers and stack slot.
300  CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
301  *DAG.getContext());
302 
303  // Analyze return values.
304  CCInfo.AnalyzeReturn(Outs, RetCC_Sparc64);
305 
306  SDValue Flag;
307  SmallVector<SDValue, 4> RetOps(1, Chain);
308 
309  // The second operand on the return instruction is the return address offset.
310  // The return address is always %i7+8 with the 64-bit ABI.
311  RetOps.push_back(DAG.getConstant(8, DL, MVT::i32));
312 
313  // Copy the result values into the output registers.
314  for (unsigned i = 0; i != RVLocs.size(); ++i) {
315  CCValAssign &VA = RVLocs[i];
316  assert(VA.isRegLoc() && "Can only return in registers!");
317  SDValue OutVal = OutVals[i];
318 
319  // Integer return values must be sign or zero extended by the callee.
320  switch (VA.getLocInfo()) {
321  case CCValAssign::Full: break;
322  case CCValAssign::SExt:
323  OutVal = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), OutVal);
324  break;
325  case CCValAssign::ZExt:
326  OutVal = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), OutVal);
327  break;
328  case CCValAssign::AExt:
329  OutVal = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), OutVal);
330  break;
331  default:
332  llvm_unreachable("Unknown loc info!");
333  }
334 
335  // The custom bit on an i32 return value indicates that it should be passed
336  // in the high bits of the register.
337  if (VA.getValVT() == MVT::i32 && VA.needsCustom()) {
338  OutVal = DAG.getNode(ISD::SHL, DL, MVT::i64, OutVal,
339  DAG.getConstant(32, DL, MVT::i32));
340 
341  // The next value may go in the low bits of the same register.
342  // Handle both at once.
343  if (i+1 < RVLocs.size() && RVLocs[i+1].getLocReg() == VA.getLocReg()) {
344  SDValue NV = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, OutVals[i+1]);
345  OutVal = DAG.getNode(ISD::OR, DL, MVT::i64, OutVal, NV);
346  // Skip the next value, it's already done.
347  ++i;
348  }
349  }
350 
351  Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), OutVal, Flag);
352 
353  // Guarantee that all emitted copies are stuck together with flags.
354  Flag = Chain.getValue(1);
355  RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
356  }
357 
358  RetOps[0] = Chain; // Update chain.
359 
360  // Add the flag if we have it.
361  if (Flag.getNode())
362  RetOps.push_back(Flag);
363 
364  return DAG.getNode(SPISD::RET_FLAG, DL, MVT::Other, RetOps);
365 }
366 
368  SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
370  SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
371  if (Subtarget->is64Bit())
372  return LowerFormalArguments_64(Chain, CallConv, IsVarArg, Ins,
373  DL, DAG, InVals);
374  return LowerFormalArguments_32(Chain, CallConv, IsVarArg, Ins,
375  DL, DAG, InVals);
376 }
377 
378 /// LowerFormalArguments32 - V8 uses a very simple ABI, where all values are
379 /// passed in either one or two GPRs, including FP values. TODO: we should
380 /// pass FP values in FP registers for fastcc functions.
382  SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
383  const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
384  SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
386  MachineRegisterInfo &RegInfo = MF.getRegInfo();
388 
389  // Assign locations to all of the incoming arguments.
391  CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
392  *DAG.getContext());
393  CCInfo.AnalyzeFormalArguments(Ins, CC_Sparc32);
394 
395  const unsigned StackOffset = 92;
396  bool IsLittleEndian = DAG.getDataLayout().isLittleEndian();
397 
398  unsigned InIdx = 0;
399  for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i, ++InIdx) {
400  CCValAssign &VA = ArgLocs[i];
401 
402  if (Ins[InIdx].Flags.isSRet()) {
403  if (InIdx != 0)
404  report_fatal_error("sparc only supports sret on the first parameter");
405  // Get SRet from [%fp+64].
406  int FrameIdx = MF.getFrameInfo().CreateFixedObject(4, 64, true);
407  SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);
408  SDValue Arg =
409  DAG.getLoad(MVT::i32, dl, Chain, FIPtr, MachinePointerInfo());
410  InVals.push_back(Arg);
411  continue;
412  }
413 
414  if (VA.isRegLoc()) {
415  if (VA.needsCustom()) {
416  assert(VA.getLocVT() == MVT::f64 || VA.getLocVT() == MVT::v2i32);
417 
418  Register VRegHi = RegInfo.createVirtualRegister(&SP::IntRegsRegClass);
419  MF.getRegInfo().addLiveIn(VA.getLocReg(), VRegHi);
420  SDValue HiVal = DAG.getCopyFromReg(Chain, dl, VRegHi, MVT::i32);
421 
422  assert(i+1 < e);
423  CCValAssign &NextVA = ArgLocs[++i];
424 
425  SDValue LoVal;
426  if (NextVA.isMemLoc()) {
427  int FrameIdx = MF.getFrameInfo().
428  CreateFixedObject(4, StackOffset+NextVA.getLocMemOffset(),true);
429  SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);
430  LoVal = DAG.getLoad(MVT::i32, dl, Chain, FIPtr, MachinePointerInfo());
431  } else {
432  Register loReg = MF.addLiveIn(NextVA.getLocReg(),
433  &SP::IntRegsRegClass);
434  LoVal = DAG.getCopyFromReg(Chain, dl, loReg, MVT::i32);
435  }
436 
437  if (IsLittleEndian)
438  std::swap(LoVal, HiVal);
439 
440  SDValue WholeValue =
441  DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, LoVal, HiVal);
442  WholeValue = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), WholeValue);
443  InVals.push_back(WholeValue);
444  continue;
445  }
446  Register VReg = RegInfo.createVirtualRegister(&SP::IntRegsRegClass);
447  MF.getRegInfo().addLiveIn(VA.getLocReg(), VReg);
448  SDValue Arg = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
449  if (VA.getLocVT() == MVT::f32)
450  Arg = DAG.getNode(ISD::BITCAST, dl, MVT::f32, Arg);
451  else if (VA.getLocVT() != MVT::i32) {
452  Arg = DAG.getNode(ISD::AssertSext, dl, MVT::i32, Arg,
453  DAG.getValueType(VA.getLocVT()));
454  Arg = DAG.getNode(ISD::TRUNCATE, dl, VA.getLocVT(), Arg);
455  }
456  InVals.push_back(Arg);
457  continue;
458  }
459 
460  assert(VA.isMemLoc());
461 
462  unsigned Offset = VA.getLocMemOffset()+StackOffset;
463  auto PtrVT = getPointerTy(DAG.getDataLayout());
464 
465  if (VA.needsCustom()) {
466  assert(VA.getValVT() == MVT::f64 || VA.getValVT() == MVT::v2i32);
467  // If it is double-word aligned, just load.
468  if (Offset % 8 == 0) {
469  int FI = MF.getFrameInfo().CreateFixedObject(8,
470  Offset,
471  true);
472  SDValue FIPtr = DAG.getFrameIndex(FI, PtrVT);
473  SDValue Load =
474  DAG.getLoad(VA.getValVT(), dl, Chain, FIPtr, MachinePointerInfo());
475  InVals.push_back(Load);
476  continue;
477  }
478 
479  int FI = MF.getFrameInfo().CreateFixedObject(4,
480  Offset,
481  true);
482  SDValue FIPtr = DAG.getFrameIndex(FI, PtrVT);
483  SDValue HiVal =
484  DAG.getLoad(MVT::i32, dl, Chain, FIPtr, MachinePointerInfo());
485  int FI2 = MF.getFrameInfo().CreateFixedObject(4,
486  Offset+4,
487  true);
488  SDValue FIPtr2 = DAG.getFrameIndex(FI2, PtrVT);
489 
490  SDValue LoVal =
491  DAG.getLoad(MVT::i32, dl, Chain, FIPtr2, MachinePointerInfo());
492 
493  if (IsLittleEndian)
494  std::swap(LoVal, HiVal);
495 
496  SDValue WholeValue =
497  DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, LoVal, HiVal);
498  WholeValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), WholeValue);
499  InVals.push_back(WholeValue);
500  continue;
501  }
502 
503  int FI = MF.getFrameInfo().CreateFixedObject(4,
504  Offset,
505  true);
506  SDValue FIPtr = DAG.getFrameIndex(FI, PtrVT);
507  SDValue Load ;
508  if (VA.getValVT() == MVT::i32 || VA.getValVT() == MVT::f32) {
509  Load = DAG.getLoad(VA.getValVT(), dl, Chain, FIPtr, MachinePointerInfo());
510  } else if (VA.getValVT() == MVT::f128) {
511  report_fatal_error("SPARCv8 does not handle f128 in calls; "
512  "pass indirectly");
513  } else {
514  // We shouldn't see any other value types here.
515  llvm_unreachable("Unexpected ValVT encountered in frame lowering.");
516  }
517  InVals.push_back(Load);
518  }
519 
520  if (MF.getFunction().hasStructRetAttr()) {
521  // Copy the SRet Argument to SRetReturnReg.
523  Register Reg = SFI->getSRetReturnReg();
524  if (!Reg) {
525  Reg = MF.getRegInfo().createVirtualRegister(&SP::IntRegsRegClass);
526  SFI->setSRetReturnReg(Reg);
527  }
528  SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), dl, Reg, InVals[0]);
529  Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Copy, Chain);
530  }
531 
532  // Store remaining ArgRegs to the stack if this is a varargs function.
533  if (isVarArg) {
534  static const MCPhysReg ArgRegs[] = {
535  SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5
536  };
537  unsigned NumAllocated = CCInfo.getFirstUnallocated(ArgRegs);
538  const MCPhysReg *CurArgReg = ArgRegs+NumAllocated, *ArgRegEnd = ArgRegs+6;
539  unsigned ArgOffset = CCInfo.getNextStackOffset();
540  if (NumAllocated == 6)
541  ArgOffset += StackOffset;
542  else {
543  assert(!ArgOffset);
544  ArgOffset = 68+4*NumAllocated;
545  }
546 
547  // Remember the vararg offset for the va_start implementation.
548  FuncInfo->setVarArgsFrameOffset(ArgOffset);
549 
550  std::vector<SDValue> OutChains;
551 
552  for (; CurArgReg != ArgRegEnd; ++CurArgReg) {
553  Register VReg = RegInfo.createVirtualRegister(&SP::IntRegsRegClass);
554  MF.getRegInfo().addLiveIn(*CurArgReg, VReg);
555  SDValue Arg = DAG.getCopyFromReg(DAG.getRoot(), dl, VReg, MVT::i32);
556 
557  int FrameIdx = MF.getFrameInfo().CreateFixedObject(4, ArgOffset,
558  true);
559  SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);
560 
561  OutChains.push_back(
562  DAG.getStore(DAG.getRoot(), dl, Arg, FIPtr, MachinePointerInfo()));
563  ArgOffset += 4;
564  }
565 
566  if (!OutChains.empty()) {
567  OutChains.push_back(Chain);
568  Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
569  }
570  }
571 
572  return Chain;
573 }
574 
575 // Lower formal arguments for the 64 bit ABI.
577  SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
579  SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
581 
582  // Analyze arguments according to CC_Sparc64.
584  CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), ArgLocs,
585  *DAG.getContext());
586  CCInfo.AnalyzeFormalArguments(Ins, CC_Sparc64);
587 
588  // The argument array begins at %fp+BIAS+128, after the register save area.
589  const unsigned ArgArea = 128;
590 
591  for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
592  CCValAssign &VA = ArgLocs[i];
593  if (VA.isRegLoc()) {
594  // This argument is passed in a register.
595  // All integer register arguments are promoted by the caller to i64.
596 
597  // Create a virtual register for the promoted live-in value.
598  Register VReg = MF.addLiveIn(VA.getLocReg(),
599  getRegClassFor(VA.getLocVT()));
600  SDValue Arg = DAG.getCopyFromReg(Chain, DL, VReg, VA.getLocVT());
601 
602  // Get the high bits for i32 struct elements.
603  if (VA.getValVT() == MVT::i32 && VA.needsCustom())
604  Arg = DAG.getNode(ISD::SRL, DL, VA.getLocVT(), Arg,
605  DAG.getConstant(32, DL, MVT::i32));
606 
607  // The caller promoted the argument, so insert an Assert?ext SDNode so we
608  // won't promote the value again in this function.
609  switch (VA.getLocInfo()) {
610  case CCValAssign::SExt:
611  Arg = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), Arg,
612  DAG.getValueType(VA.getValVT()));
613  break;
614  case CCValAssign::ZExt:
615  Arg = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), Arg,
616  DAG.getValueType(VA.getValVT()));
617  break;
618  default:
619  break;
620  }
621 
622  // Truncate the register down to the argument type.
623  if (VA.isExtInLoc())
624  Arg = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Arg);
625 
626  InVals.push_back(Arg);
627  continue;
628  }
629 
630  // The registers are exhausted. This argument was passed on the stack.
631  assert(VA.isMemLoc());
632  // The CC_Sparc64_Full/Half functions compute stack offsets relative to the
633  // beginning of the arguments area at %fp+BIAS+128.
634  unsigned Offset = VA.getLocMemOffset() + ArgArea;
635  unsigned ValSize = VA.getValVT().getSizeInBits() / 8;
636  // Adjust offset for extended arguments, SPARC is big-endian.
637  // The caller will have written the full slot with extended bytes, but we
638  // prefer our own extending loads.
639  if (VA.isExtInLoc())
640  Offset += 8 - ValSize;
641  int FI = MF.getFrameInfo().CreateFixedObject(ValSize, Offset, true);
642  InVals.push_back(
643  DAG.getLoad(VA.getValVT(), DL, Chain,
646  }
647 
648  if (!IsVarArg)
649  return Chain;
650 
651  // This function takes variable arguments, some of which may have been passed
652  // in registers %i0-%i5. Variable floating point arguments are never passed
653  // in floating point registers. They go on %i0-%i5 or on the stack like
654  // integer arguments.
655  //
656  // The va_start intrinsic needs to know the offset to the first variable
657  // argument.
658  unsigned ArgOffset = CCInfo.getNextStackOffset();
660  // Skip the 128 bytes of register save area.
661  FuncInfo->setVarArgsFrameOffset(ArgOffset + ArgArea +
662  Subtarget->getStackPointerBias());
663 
664  // Save the variable arguments that were passed in registers.
665  // The caller is required to reserve stack space for 6 arguments regardless
666  // of how many arguments were actually passed.
667  SmallVector<SDValue, 8> OutChains;
668  for (; ArgOffset < 6*8; ArgOffset += 8) {
669  Register VReg = MF.addLiveIn(SP::I0 + ArgOffset/8, &SP::I64RegsRegClass);
670  SDValue VArg = DAG.getCopyFromReg(Chain, DL, VReg, MVT::i64);
671  int FI = MF.getFrameInfo().CreateFixedObject(8, ArgOffset + ArgArea, true);
672  auto PtrVT = getPointerTy(MF.getDataLayout());
673  OutChains.push_back(
674  DAG.getStore(Chain, DL, VArg, DAG.getFrameIndex(FI, PtrVT),
676  }
677 
678  if (!OutChains.empty())
679  Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains);
680 
681  return Chain;
682 }
683 
684 SDValue
686  SmallVectorImpl<SDValue> &InVals) const {
687  if (Subtarget->is64Bit())
688  return LowerCall_64(CLI, InVals);
689  return LowerCall_32(CLI, InVals);
690 }
691 
692 static bool hasReturnsTwiceAttr(SelectionDAG &DAG, SDValue Callee,
693  const CallBase *Call) {
694  if (Call)
695  return Call->hasFnAttr(Attribute::ReturnsTwice);
696 
697  const Function *CalleeFn = nullptr;
698  if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
699  CalleeFn = dyn_cast<Function>(G->getGlobal());
700  } else if (ExternalSymbolSDNode *E =
701  dyn_cast<ExternalSymbolSDNode>(Callee)) {
702  const Function &Fn = DAG.getMachineFunction().getFunction();
703  const Module *M = Fn.getParent();
704  const char *CalleeName = E->getSymbol();
705  CalleeFn = M->getFunction(CalleeName);
706  }
707 
708  if (!CalleeFn)
709  return false;
710  return CalleeFn->hasFnAttribute(Attribute::ReturnsTwice);
711 }
712 
713 // Lower a call for the 32-bit ABI.
714 SDValue
716  SmallVectorImpl<SDValue> &InVals) const {
717  SelectionDAG &DAG = CLI.DAG;
718  SDLoc &dl = CLI.DL;
720  SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
722  SDValue Chain = CLI.Chain;
723  SDValue Callee = CLI.Callee;
724  bool &isTailCall = CLI.IsTailCall;
725  CallingConv::ID CallConv = CLI.CallConv;
726  bool isVarArg = CLI.IsVarArg;
727 
728  // Sparc target does not yet support tail call optimization.
729  isTailCall = false;
730 
731  // Analyze operands of the call, assigning locations to each operand.
733  CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
734  *DAG.getContext());
735  CCInfo.AnalyzeCallOperands(Outs, CC_Sparc32);
736 
737  // Get the size of the outgoing arguments stack space requirement.
738  unsigned ArgsSize = CCInfo.getNextStackOffset();
739 
740  // Keep stack frames 8-byte aligned.
741  ArgsSize = (ArgsSize+7) & ~7;
742 
744 
745  // Create local copies for byval args.
746  SmallVector<SDValue, 8> ByValArgs;
747  for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
748  ISD::ArgFlagsTy Flags = Outs[i].Flags;
749  if (!Flags.isByVal())
750  continue;
751 
752  SDValue Arg = OutVals[i];
753  unsigned Size = Flags.getByValSize();
754  Align Alignment = Flags.getNonZeroByValAlign();
755 
756  if (Size > 0U) {
757  int FI = MFI.CreateStackObject(Size, Alignment, false);
758  SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
759  SDValue SizeNode = DAG.getConstant(Size, dl, MVT::i32);
760 
761  Chain = DAG.getMemcpy(Chain, dl, FIPtr, Arg, SizeNode, Alignment,
762  false, // isVolatile,
763  (Size <= 32), // AlwaysInline if size <= 32,
764  false, // isTailCall
766  ByValArgs.push_back(FIPtr);
767  }
768  else {
769  SDValue nullVal;
770  ByValArgs.push_back(nullVal);
771  }
772  }
773 
774  Chain = DAG.getCALLSEQ_START(Chain, ArgsSize, 0, dl);
775 
777  SmallVector<SDValue, 8> MemOpChains;
778 
779  const unsigned StackOffset = 92;
780  bool hasStructRetAttr = false;
781  unsigned SRetArgSize = 0;
782  // Walk the register/memloc assignments, inserting copies/loads.
783  for (unsigned i = 0, realArgIdx = 0, byvalArgIdx = 0, e = ArgLocs.size();
784  i != e;
785  ++i, ++realArgIdx) {
786  CCValAssign &VA = ArgLocs[i];
787  SDValue Arg = OutVals[realArgIdx];
788 
789  ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags;
790 
791  // Use local copy if it is a byval arg.
792  if (Flags.isByVal()) {
793  Arg = ByValArgs[byvalArgIdx++];
794  if (!Arg) {
795  continue;
796  }
797  }
798 
799  // Promote the value if needed.
800  switch (VA.getLocInfo()) {
801  default: llvm_unreachable("Unknown loc info!");
802  case CCValAssign::Full: break;
803  case CCValAssign::SExt:
804  Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
805  break;
806  case CCValAssign::ZExt:
807  Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
808  break;
809  case CCValAssign::AExt:
810  Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
811  break;
812  case CCValAssign::BCvt:
813  Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg);
814  break;
815  }
816 
817  if (Flags.isSRet()) {
818  assert(VA.needsCustom());
819  // store SRet argument in %sp+64
820  SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
821  SDValue PtrOff = DAG.getIntPtrConstant(64, dl);
822  PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
823  MemOpChains.push_back(
824  DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
825  hasStructRetAttr = true;
826  // sret only allowed on first argument
827  assert(Outs[realArgIdx].OrigArgIndex == 0);
828  PointerType *Ty = cast<PointerType>(CLI.getArgs()[0].Ty);
829  Type *ElementTy = Ty->getElementType();
830  SRetArgSize = DAG.getDataLayout().getTypeAllocSize(ElementTy);
831  continue;
832  }
833 
834  if (VA.needsCustom()) {
835  assert(VA.getLocVT() == MVT::f64 || VA.getLocVT() == MVT::v2i32);
836 
837  if (VA.isMemLoc()) {
838  unsigned Offset = VA.getLocMemOffset() + StackOffset;
839  // if it is double-word aligned, just store.
840  if (Offset % 8 == 0) {
841  SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
842  SDValue PtrOff = DAG.getIntPtrConstant(Offset, dl);
843  PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
844  MemOpChains.push_back(
845  DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
846  continue;
847  }
848  }
849 
850  if (VA.getLocVT() == MVT::f64) {
851  // Move from the float value from float registers into the
852  // integer registers.
853  if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Arg))
854  Arg = bitcastConstantFPToInt(C, dl, DAG);
855  else
856  Arg = DAG.getNode(ISD::BITCAST, dl, MVT::v2i32, Arg);
857  }
858 
860  Arg,
861  DAG.getConstant(0, dl, getVectorIdxTy(DAG.getDataLayout())));
863  Arg,
864  DAG.getConstant(1, dl, getVectorIdxTy(DAG.getDataLayout())));
865 
866  if (VA.isRegLoc()) {
867  RegsToPass.push_back(std::make_pair(VA.getLocReg(), Part0));
868  assert(i+1 != e);
869  CCValAssign &NextVA = ArgLocs[++i];
870  if (NextVA.isRegLoc()) {
871  RegsToPass.push_back(std::make_pair(NextVA.getLocReg(), Part1));
872  } else {
873  // Store the second part in stack.
874  unsigned Offset = NextVA.getLocMemOffset() + StackOffset;
875  SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
876  SDValue PtrOff = DAG.getIntPtrConstant(Offset, dl);
877  PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
878  MemOpChains.push_back(
879  DAG.getStore(Chain, dl, Part1, PtrOff, MachinePointerInfo()));
880  }
881  } else {
882  unsigned Offset = VA.getLocMemOffset() + StackOffset;
883  // Store the first part.
884  SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
885  SDValue PtrOff = DAG.getIntPtrConstant(Offset, dl);
886  PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
887  MemOpChains.push_back(
888  DAG.getStore(Chain, dl, Part0, PtrOff, MachinePointerInfo()));
889  // Store the second part.
890  PtrOff = DAG.getIntPtrConstant(Offset + 4, dl);
891  PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
892  MemOpChains.push_back(
893  DAG.getStore(Chain, dl, Part1, PtrOff, MachinePointerInfo()));
894  }
895  continue;
896  }
897 
898  // Arguments that can be passed on register must be kept at
899  // RegsToPass vector
900  if (VA.isRegLoc()) {
901  if (VA.getLocVT() != MVT::f32) {
902  RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
903  continue;
904  }
905  Arg = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg);
906  RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
907  continue;
908  }
909 
910  assert(VA.isMemLoc());
911 
912  // Create a store off the stack pointer for this argument.
913  SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
915  dl);
916  PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
917  MemOpChains.push_back(
918  DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
919  }
920 
921 
922  // Emit all stores, make sure the occur before any copies into physregs.
923  if (!MemOpChains.empty())
924  Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
925 
926  // Build a sequence of copy-to-reg nodes chained together with token
927  // chain and flag operands which copy the outgoing args into registers.
928  // The InFlag in necessary since all emitted instructions must be
929  // stuck together.
930  SDValue InFlag;
931  for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
932  Register Reg = toCallerWindow(RegsToPass[i].first);
933  Chain = DAG.getCopyToReg(Chain, dl, Reg, RegsToPass[i].second, InFlag);
934  InFlag = Chain.getValue(1);
935  }
936 
937  bool hasReturnsTwice = hasReturnsTwiceAttr(DAG, Callee, CLI.CB);
938 
939  // If the callee is a GlobalAddress node (quite common, every direct call is)
940  // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
941  // Likewise ExternalSymbol -> TargetExternalSymbol.
944  if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
945  Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, MVT::i32, 0, TF);
946  else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee))
947  Callee = DAG.getTargetExternalSymbol(E->getSymbol(), MVT::i32, TF);
948 
949  // Returns a chain & a flag for retval copy to use
950  SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
952  Ops.push_back(Chain);
953  Ops.push_back(Callee);
954  if (hasStructRetAttr)
955  Ops.push_back(DAG.getTargetConstant(SRetArgSize, dl, MVT::i32));
956  for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
957  Ops.push_back(DAG.getRegister(toCallerWindow(RegsToPass[i].first),
958  RegsToPass[i].second.getValueType()));
959 
960  // Add a register mask operand representing the call-preserved registers.
961  const SparcRegisterInfo *TRI = Subtarget->getRegisterInfo();
962  const uint32_t *Mask =
963  ((hasReturnsTwice)
964  ? TRI->getRTCallPreservedMask(CallConv)
965  : TRI->getCallPreservedMask(DAG.getMachineFunction(), CallConv));
966  assert(Mask && "Missing call preserved mask for calling convention");
967  Ops.push_back(DAG.getRegisterMask(Mask));
968 
969  if (InFlag.getNode())
970  Ops.push_back(InFlag);
971 
972  Chain = DAG.getNode(SPISD::CALL, dl, NodeTys, Ops);
973  InFlag = Chain.getValue(1);
974 
975  Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(ArgsSize, dl, true),
976  DAG.getIntPtrConstant(0, dl, true), InFlag, dl);
977  InFlag = Chain.getValue(1);
978 
979  // Assign locations to each value returned by this call.
981  CCState RVInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
982  *DAG.getContext());
983 
984  RVInfo.AnalyzeCallResult(Ins, RetCC_Sparc32);
985 
986  // Copy all of the result registers out of their specified physreg.
987  for (unsigned i = 0; i != RVLocs.size(); ++i) {
988  if (RVLocs[i].getLocVT() == MVT::v2i32) {
989  SDValue Vec = DAG.getNode(ISD::UNDEF, dl, MVT::v2i32);
990  SDValue Lo = DAG.getCopyFromReg(
991  Chain, dl, toCallerWindow(RVLocs[i++].getLocReg()), MVT::i32, InFlag);
992  Chain = Lo.getValue(1);
993  InFlag = Lo.getValue(2);
994  Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2i32, Vec, Lo,
995  DAG.getConstant(0, dl, MVT::i32));
996  SDValue Hi = DAG.getCopyFromReg(
997  Chain, dl, toCallerWindow(RVLocs[i].getLocReg()), MVT::i32, InFlag);
998  Chain = Hi.getValue(1);
999  InFlag = Hi.getValue(2);
1000  Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2i32, Vec, Hi,
1001  DAG.getConstant(1, dl, MVT::i32));
1002  InVals.push_back(Vec);
1003  } else {
1004  Chain =
1005  DAG.getCopyFromReg(Chain, dl, toCallerWindow(RVLocs[i].getLocReg()),
1006  RVLocs[i].getValVT(), InFlag)
1007  .getValue(1);
1008  InFlag = Chain.getValue(2);
1009  InVals.push_back(Chain.getValue(0));
1010  }
1011  }
1012 
1013  return Chain;
1014 }
1015 
1016 // FIXME? Maybe this could be a TableGen attribute on some registers and
1017 // this table could be generated automatically from RegInfo.
1019  const MachineFunction &MF) const {
1021  .Case("i0", SP::I0).Case("i1", SP::I1).Case("i2", SP::I2).Case("i3", SP::I3)
1022  .Case("i4", SP::I4).Case("i5", SP::I5).Case("i6", SP::I6).Case("i7", SP::I7)
1023  .Case("o0", SP::O0).Case("o1", SP::O1).Case("o2", SP::O2).Case("o3", SP::O3)
1024  .Case("o4", SP::O4).Case("o5", SP::O5).Case("o6", SP::O6).Case("o7", SP::O7)
1025  .Case("l0", SP::L0).Case("l1", SP::L1).Case("l2", SP::L2).Case("l3", SP::L3)
1026  .Case("l4", SP::L4).Case("l5", SP::L5).Case("l6", SP::L6).Case("l7", SP::L7)
1027  .Case("g0", SP::G0).Case("g1", SP::G1).Case("g2", SP::G2).Case("g3", SP::G3)
1028  .Case("g4", SP::G4).Case("g5", SP::G5).Case("g6", SP::G6).Case("g7", SP::G7)
1029  .Default(0);
1030 
1031  if (Reg)
1032  return Reg;
1033 
1034  report_fatal_error("Invalid register name global variable");
1035 }
1036 
1037 // Fixup floating point arguments in the ... part of a varargs call.
1038 //
1039 // The SPARC v9 ABI requires that floating point arguments are treated the same
1040 // as integers when calling a varargs function. This does not apply to the
1041 // fixed arguments that are part of the function's prototype.
1042 //
1043 // This function post-processes a CCValAssign array created by
1044 // AnalyzeCallOperands().
1046  ArrayRef<ISD::OutputArg> Outs) {
1047  for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1048  const CCValAssign &VA = ArgLocs[i];
1049  MVT ValTy = VA.getLocVT();
1050  // FIXME: What about f32 arguments? C promotes them to f64 when calling
1051  // varargs functions.
1052  if (!VA.isRegLoc() || (ValTy != MVT::f64 && ValTy != MVT::f128))
1053  continue;
1054  // The fixed arguments to a varargs function still go in FP registers.
1055  if (Outs[VA.getValNo()].IsFixed)
1056  continue;
1057 
1058  // This floating point argument should be reassigned.
1059  CCValAssign NewVA;
1060 
1061  // Determine the offset into the argument array.
1062  Register firstReg = (ValTy == MVT::f64) ? SP::D0 : SP::Q0;
1063  unsigned argSize = (ValTy == MVT::f64) ? 8 : 16;
1064  unsigned Offset = argSize * (VA.getLocReg() - firstReg);
1065  assert(Offset < 16*8 && "Offset out of range, bad register enum?");
1066 
1067  if (Offset < 6*8) {
1068  // This argument should go in %i0-%i5.
1069  unsigned IReg = SP::I0 + Offset/8;
1070  if (ValTy == MVT::f64)
1071  // Full register, just bitconvert into i64.
1072  NewVA = CCValAssign::getReg(VA.getValNo(), VA.getValVT(),
1073  IReg, MVT::i64, CCValAssign::BCvt);
1074  else {
1075  assert(ValTy == MVT::f128 && "Unexpected type!");
1076  // Full register, just bitconvert into i128 -- We will lower this into
1077  // two i64s in LowerCall_64.
1078  NewVA = CCValAssign::getCustomReg(VA.getValNo(), VA.getValVT(),
1079  IReg, MVT::i128, CCValAssign::BCvt);
1080  }
1081  } else {
1082  // This needs to go to memory, we're out of integer registers.
1083  NewVA = CCValAssign::getMem(VA.getValNo(), VA.getValVT(),
1084  Offset, VA.getLocVT(), VA.getLocInfo());
1085  }
1086  ArgLocs[i] = NewVA;
1087  }
1088 }
1089 
1090 // Lower a call for the 64-bit ABI.
1091 SDValue
1093  SmallVectorImpl<SDValue> &InVals) const {
1094  SelectionDAG &DAG = CLI.DAG;
1095  SDLoc DL = CLI.DL;
1096  SDValue Chain = CLI.Chain;
1097  auto PtrVT = getPointerTy(DAG.getDataLayout());
1098 
1099  // Sparc target does not yet support tail call optimization.
1100  CLI.IsTailCall = false;
1101 
1102  // Analyze operands of the call, assigning locations to each operand.
1104  CCState CCInfo(CLI.CallConv, CLI.IsVarArg, DAG.getMachineFunction(), ArgLocs,
1105  *DAG.getContext());
1106  CCInfo.AnalyzeCallOperands(CLI.Outs, CC_Sparc64);
1107 
1108  // Get the size of the outgoing arguments stack space requirement.
1109  // The stack offset computed by CC_Sparc64 includes all arguments.
1110  // Called functions expect 6 argument words to exist in the stack frame, used
1111  // or not.
1112  unsigned ArgsSize = std::max(6*8u, CCInfo.getNextStackOffset());
1113 
1114  // Keep stack frames 16-byte aligned.
1115  ArgsSize = alignTo(ArgsSize, 16);
1116 
1117  // Varargs calls require special treatment.
1118  if (CLI.IsVarArg)
1119  fixupVariableFloatArgs(ArgLocs, CLI.Outs);
1120 
1121  // Adjust the stack pointer to make room for the arguments.
1122  // FIXME: Use hasReservedCallFrame to avoid %sp adjustments around all calls
1123  // with more than 6 arguments.
1124  Chain = DAG.getCALLSEQ_START(Chain, ArgsSize, 0, DL);
1125 
1126  // Collect the set of registers to pass to the function and their values.
1127  // This will be emitted as a sequence of CopyToReg nodes glued to the call
1128  // instruction.
1130 
1131  // Collect chains from all the memory opeations that copy arguments to the
1132  // stack. They must follow the stack pointer adjustment above and precede the
1133  // call instruction itself.
1134  SmallVector<SDValue, 8> MemOpChains;
1135 
1136  for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1137  const CCValAssign &VA = ArgLocs[i];
1138  SDValue Arg = CLI.OutVals[i];
1139 
1140  // Promote the value if needed.
1141  switch (VA.getLocInfo()) {
1142  default:
1143  llvm_unreachable("Unknown location info!");
1144  case CCValAssign::Full:
1145  break;
1146  case CCValAssign::SExt:
1147  Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Arg);
1148  break;
1149  case CCValAssign::ZExt:
1150  Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg);
1151  break;
1152  case CCValAssign::AExt:
1153  Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg);
1154  break;
1155  case CCValAssign::BCvt:
1156  // fixupVariableFloatArgs() may create bitcasts from f128 to i128. But
1157  // SPARC does not support i128 natively. Lower it into two i64, see below.
1158  if (!VA.needsCustom() || VA.getValVT() != MVT::f128
1159  || VA.getLocVT() != MVT::i128)
1160  Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg);
1161  break;
1162  }
1163 
1164  if (VA.isRegLoc()) {
1165  if (VA.needsCustom() && VA.getValVT() == MVT::f128
1166  && VA.getLocVT() == MVT::i128) {
1167  // Store and reload into the integer register reg and reg+1.
1168  unsigned Offset = 8 * (VA.getLocReg() - SP::I0);
1169  unsigned StackOffset = Offset + Subtarget->getStackPointerBias() + 128;
1170  SDValue StackPtr = DAG.getRegister(SP::O6, PtrVT);
1171  SDValue HiPtrOff = DAG.getIntPtrConstant(StackOffset, DL);
1172  HiPtrOff = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, HiPtrOff);
1173  SDValue LoPtrOff = DAG.getIntPtrConstant(StackOffset + 8, DL);
1174  LoPtrOff = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, LoPtrOff);
1175 
1176  // Store to %sp+BIAS+128+Offset
1177  SDValue Store =
1178  DAG.getStore(Chain, DL, Arg, HiPtrOff, MachinePointerInfo());
1179  // Load into Reg and Reg+1
1180  SDValue Hi64 =
1181  DAG.getLoad(MVT::i64, DL, Store, HiPtrOff, MachinePointerInfo());
1182  SDValue Lo64 =
1183  DAG.getLoad(MVT::i64, DL, Store, LoPtrOff, MachinePointerInfo());
1184  RegsToPass.push_back(std::make_pair(toCallerWindow(VA.getLocReg()),
1185  Hi64));
1186  RegsToPass.push_back(std::make_pair(toCallerWindow(VA.getLocReg()+1),
1187  Lo64));
1188  continue;
1189  }
1190 
1191  // The custom bit on an i32 return value indicates that it should be
1192  // passed in the high bits of the register.
1193  if (VA.getValVT() == MVT::i32 && VA.needsCustom()) {
1194  Arg = DAG.getNode(ISD::SHL, DL, MVT::i64, Arg,
1195  DAG.getConstant(32, DL, MVT::i32));
1196 
1197  // The next value may go in the low bits of the same register.
1198  // Handle both at once.
1199  if (i+1 < ArgLocs.size() && ArgLocs[i+1].isRegLoc() &&
1200  ArgLocs[i+1].getLocReg() == VA.getLocReg()) {
1202  CLI.OutVals[i+1]);
1203  Arg = DAG.getNode(ISD::OR, DL, MVT::i64, Arg, NV);
1204  // Skip the next value, it's already done.
1205  ++i;
1206  }
1207  }
1208  RegsToPass.push_back(std::make_pair(toCallerWindow(VA.getLocReg()), Arg));
1209  continue;
1210  }
1211 
1212  assert(VA.isMemLoc());
1213 
1214  // Create a store off the stack pointer for this argument.
1215  SDValue StackPtr = DAG.getRegister(SP::O6, PtrVT);
1216  // The argument area starts at %fp+BIAS+128 in the callee frame,
1217  // %sp+BIAS+128 in ours.
1218  SDValue PtrOff = DAG.getIntPtrConstant(VA.getLocMemOffset() +
1219  Subtarget->getStackPointerBias() +
1220  128, DL);
1221  PtrOff = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, PtrOff);
1222  MemOpChains.push_back(
1223  DAG.getStore(Chain, DL, Arg, PtrOff, MachinePointerInfo()));
1224  }
1225 
1226  // Emit all stores, make sure they occur before the call.
1227  if (!MemOpChains.empty())
1228  Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
1229 
1230  // Build a sequence of CopyToReg nodes glued together with token chain and
1231  // glue operands which copy the outgoing args into registers. The InGlue is
1232  // necessary since all emitted instructions must be stuck together in order
1233  // to pass the live physical registers.
1234  SDValue InGlue;
1235  for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
1236  Chain = DAG.getCopyToReg(Chain, DL,
1237  RegsToPass[i].first, RegsToPass[i].second, InGlue);
1238  InGlue = Chain.getValue(1);
1239  }
1240 
1241  // If the callee is a GlobalAddress node (quite common, every direct call is)
1242  // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
1243  // Likewise ExternalSymbol -> TargetExternalSymbol.
1244  SDValue Callee = CLI.Callee;
1245  bool hasReturnsTwice = hasReturnsTwiceAttr(DAG, Callee, CLI.CB);
1248  if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
1249  Callee = DAG.getTargetGlobalAddress(G->getGlobal(), DL, PtrVT, 0, TF);
1250  else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee))
1251  Callee = DAG.getTargetExternalSymbol(E->getSymbol(), PtrVT, TF);
1252 
1253  // Build the operands for the call instruction itself.
1255  Ops.push_back(Chain);
1256  Ops.push_back(Callee);
1257  for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
1258  Ops.push_back(DAG.getRegister(RegsToPass[i].first,
1259  RegsToPass[i].second.getValueType()));
1260 
1261  // Add a register mask operand representing the call-preserved registers.
1262  const SparcRegisterInfo *TRI = Subtarget->getRegisterInfo();
1263  const uint32_t *Mask =
1264  ((hasReturnsTwice) ? TRI->getRTCallPreservedMask(CLI.CallConv)
1266  CLI.CallConv));
1267  assert(Mask && "Missing call preserved mask for calling convention");
1268  Ops.push_back(DAG.getRegisterMask(Mask));
1269 
1270  // Make sure the CopyToReg nodes are glued to the call instruction which
1271  // consumes the registers.
1272  if (InGlue.getNode())
1273  Ops.push_back(InGlue);
1274 
1275  // Now the call itself.
1276  SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
1277  Chain = DAG.getNode(SPISD::CALL, DL, NodeTys, Ops);
1278  InGlue = Chain.getValue(1);
1279 
1280  // Revert the stack pointer immediately after the call.
1281  Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(ArgsSize, DL, true),
1282  DAG.getIntPtrConstant(0, DL, true), InGlue, DL);
1283  InGlue = Chain.getValue(1);
1284 
1285  // Now extract the return values. This is more or less the same as
1286  // LowerFormalArguments_64.
1287 
1288  // Assign locations to each value returned by this call.
1290  CCState RVInfo(CLI.CallConv, CLI.IsVarArg, DAG.getMachineFunction(), RVLocs,
1291  *DAG.getContext());
1292 
1293  // Set inreg flag manually for codegen generated library calls that
1294  // return float.
1295  if (CLI.Ins.size() == 1 && CLI.Ins[0].VT == MVT::f32 && !CLI.CB)
1296  CLI.Ins[0].Flags.setInReg();
1297 
1298  RVInfo.AnalyzeCallResult(CLI.Ins, RetCC_Sparc64);
1299 
1300  // Copy all of the result registers out of their specified physreg.
1301  for (unsigned i = 0; i != RVLocs.size(); ++i) {
1302  CCValAssign &VA = RVLocs[i];
1303  unsigned Reg = toCallerWindow(VA.getLocReg());
1304 
1305  // When returning 'inreg {i32, i32 }', two consecutive i32 arguments can
1306  // reside in the same register in the high and low bits. Reuse the
1307  // CopyFromReg previous node to avoid duplicate copies.
1308  SDValue RV;
1309  if (RegisterSDNode *SrcReg = dyn_cast<RegisterSDNode>(Chain.getOperand(1)))
1310  if (SrcReg->getReg() == Reg && Chain->getOpcode() == ISD::CopyFromReg)
1311  RV = Chain.getValue(0);
1312 
1313  // But usually we'll create a new CopyFromReg for a different register.
1314  if (!RV.getNode()) {
1315  RV = DAG.getCopyFromReg(Chain, DL, Reg, RVLocs[i].getLocVT(), InGlue);
1316  Chain = RV.getValue(1);
1317  InGlue = Chain.getValue(2);
1318  }
1319 
1320  // Get the high bits for i32 struct elements.
1321  if (VA.getValVT() == MVT::i32 && VA.needsCustom())
1322  RV = DAG.getNode(ISD::SRL, DL, VA.getLocVT(), RV,
1323  DAG.getConstant(32, DL, MVT::i32));
1324 
1325  // The callee promoted the return value, so insert an Assert?ext SDNode so
1326  // we won't promote the value again in this function.
1327  switch (VA.getLocInfo()) {
1328  case CCValAssign::SExt:
1329  RV = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), RV,
1330  DAG.getValueType(VA.getValVT()));
1331  break;
1332  case CCValAssign::ZExt:
1333  RV = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), RV,
1334  DAG.getValueType(VA.getValVT()));
1335  break;
1336  default:
1337  break;
1338  }
1339 
1340  // Truncate the register down to the return value type.
1341  if (VA.isExtInLoc())
1342  RV = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), RV);
1343 
1344  InVals.push_back(RV);
1345  }
1346 
1347  return Chain;
1348 }
1349 
1350 //===----------------------------------------------------------------------===//
1351 // TargetLowering Implementation
1352 //===----------------------------------------------------------------------===//
1353 
1355  if (AI->getOperation() == AtomicRMWInst::Xchg &&
1356  AI->getType()->getPrimitiveSizeInBits() == 32)
1357  return AtomicExpansionKind::None; // Uses xchg instruction
1358 
1360 }
1361 
1362 /// IntCondCCodeToICC - Convert a DAG integer condition code to a SPARC ICC
1363 /// condition.
1365  switch (CC) {
1366  default: llvm_unreachable("Unknown integer condition code!");
1367  case ISD::SETEQ: return SPCC::ICC_E;
1368  case ISD::SETNE: return SPCC::ICC_NE;
1369  case ISD::SETLT: return SPCC::ICC_L;
1370  case ISD::SETGT: return SPCC::ICC_G;
1371  case ISD::SETLE: return SPCC::ICC_LE;
1372  case ISD::SETGE: return SPCC::ICC_GE;
1373  case ISD::SETULT: return SPCC::ICC_CS;
1374  case ISD::SETULE: return SPCC::ICC_LEU;
1375  case ISD::SETUGT: return SPCC::ICC_GU;
1376  case ISD::SETUGE: return SPCC::ICC_CC;
1377  }
1378 }
1379 
1380 /// FPCondCCodeToFCC - Convert a DAG floatingp oint condition code to a SPARC
1381 /// FCC condition.
1383  switch (CC) {
1384  default: llvm_unreachable("Unknown fp condition code!");
1385  case ISD::SETEQ:
1386  case ISD::SETOEQ: return SPCC::FCC_E;
1387  case ISD::SETNE:
1388  case ISD::SETUNE: return SPCC::FCC_NE;
1389  case ISD::SETLT:
1390  case ISD::SETOLT: return SPCC::FCC_L;
1391  case ISD::SETGT:
1392  case ISD::SETOGT: return SPCC::FCC_G;
1393  case ISD::SETLE:
1394  case ISD::SETOLE: return SPCC::FCC_LE;
1395  case ISD::SETGE:
1396  case ISD::SETOGE: return SPCC::FCC_GE;
1397  case ISD::SETULT: return SPCC::FCC_UL;
1398  case ISD::SETULE: return SPCC::FCC_ULE;
1399  case ISD::SETUGT: return SPCC::FCC_UG;
1400  case ISD::SETUGE: return SPCC::FCC_UGE;
1401  case ISD::SETUO: return SPCC::FCC_U;
1402  case ISD::SETO: return SPCC::FCC_O;
1403  case ISD::SETONE: return SPCC::FCC_LG;
1404  case ISD::SETUEQ: return SPCC::FCC_UE;
1405  }
1406 }
1407 
1409  const SparcSubtarget &STI)
1410  : TargetLowering(TM), Subtarget(&STI) {
1411  MVT PtrVT = MVT::getIntegerVT(8 * TM.getPointerSize(0));
1412 
1413  // Instructions which use registers as conditionals examine all the
1414  // bits (as does the pseudo SELECT_CC expansion). I don't think it
1415  // matters much whether it's ZeroOrOneBooleanContent, or
1416  // ZeroOrNegativeOneBooleanContent, so, arbitrarily choose the
1417  // former.
1420 
1421  // Set up the register classes.
1422  addRegisterClass(MVT::i32, &SP::IntRegsRegClass);
1423  if (!Subtarget->useSoftFloat()) {
1424  addRegisterClass(MVT::f32, &SP::FPRegsRegClass);
1425  addRegisterClass(MVT::f64, &SP::DFPRegsRegClass);
1426  addRegisterClass(MVT::f128, &SP::QFPRegsRegClass);
1427  }
1428  if (Subtarget->is64Bit()) {
1429  addRegisterClass(MVT::i64, &SP::I64RegsRegClass);
1430  } else {
1431  // On 32bit sparc, we define a double-register 32bit register
1432  // class, as well. This is modeled in LLVM as a 2-vector of i32.
1433  addRegisterClass(MVT::v2i32, &SP::IntPairRegClass);
1434 
1435  // ...but almost all operations must be expanded, so set that as
1436  // the default.
1437  for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) {
1439  }
1440  // Truncating/extending stores/loads are also not supported.
1445 
1449 
1452  }
1453  // However, load and store *are* legal.
1458 
1459  // And we need to promote i64 loads/stores into vector load/store
1462 
1463  // Sadly, this doesn't work:
1464  // AddPromotedToType(ISD::LOAD, MVT::i64, MVT::v2i32);
1465  // AddPromotedToType(ISD::STORE, MVT::i64, MVT::v2i32);
1466  }
1467 
1468  // Turn FP extload into load/fpextend
1469  for (MVT VT : MVT::fp_valuetypes()) {
1473  }
1474 
1475  // Sparc doesn't have i1 sign extending load
1476  for (MVT VT : MVT::integer_valuetypes())
1478 
1479  // Turn FP truncstore into trunc + store.
1485 
1486  // Custom legalize GlobalAddress nodes into LO/HI parts.
1491 
1492  // Sparc doesn't have sext_inreg, replace them with shl/sra
1496 
1497  // Sparc has no REM or DIVREM operations.
1502 
1503  // ... nor does SparcV9.
1504  if (Subtarget->is64Bit()) {
1509  }
1510 
1511  // Custom expand fp<->sint
1516 
1517  // Custom Expand fp<->uint
1522 
1523  // Lower f16 conversion operations into library calls
1528 
1531 
1532  // Sparc has no select or setcc: expand to SELECT_CC.
1537 
1542 
1543  // Sparc doesn't have BRCOND either, it has BR_CC.
1551 
1556 
1561 
1562  if (Subtarget->is64Bit()) {
1573 
1575  Subtarget->usePopc() ? Legal : Expand);
1582  }
1583 
1584  // ATOMICs.
1585  // Atomics are supported on SparcV9. 32-bit atomics are also
1586  // supported by some Leon SparcV8 variants. Otherwise, atomics
1587  // are unsupported.
1588  if (Subtarget->isV9())
1590  else if (Subtarget->hasLeonCasa())
1592  else
1594 
1596 
1598 
1600 
1601  // Custom Lower Atomic LOAD/STORE
1604 
1605  if (Subtarget->is64Bit()) {
1610  }
1611 
1612  if (!Subtarget->is64Bit()) {
1613  // These libcalls are not available in 32-bit.
1614  setLibcallName(RTLIB::SHL_I128, nullptr);
1615  setLibcallName(RTLIB::SRL_I128, nullptr);
1616  setLibcallName(RTLIB::SRA_I128, nullptr);
1617  }
1618 
1619  if (!Subtarget->isV9()) {
1620  // SparcV8 does not have FNEGD and FABSD.
1623  }
1624 
1651 
1655 
1656  // Expands to [SU]MUL_LOHI.
1660 
1661  if (Subtarget->useSoftMulDiv()) {
1662  // .umul works for both signed and unsigned
1665  setLibcallName(RTLIB::MUL_I32, ".umul");
1666 
1668  setLibcallName(RTLIB::SDIV_I32, ".div");
1669 
1671  setLibcallName(RTLIB::UDIV_I32, ".udiv");
1672 
1673  setLibcallName(RTLIB::SREM_I32, ".rem");
1674  setLibcallName(RTLIB::UREM_I32, ".urem");
1675  }
1676 
1677  if (Subtarget->is64Bit()) {
1682 
1685 
1689  }
1690 
1691  // VASTART needs to be custom lowered to use the VarArgsFrameIndex.
1693  // VAARG needs to be lowered to not do unaligned accesses for doubles.
1695 
1698 
1699  // Use the default implementation.
1705 
1707 
1709  Subtarget->usePopc() ? Legal : Expand);
1710 
1711  if (Subtarget->isV9() && Subtarget->hasHardQuad()) {
1714  } else {
1717  }
1718 
1719  if (Subtarget->hasHardQuad()) {
1727  if (Subtarget->isV9()) {
1730  } else {
1733  }
1734 
1735  if (!Subtarget->is64Bit()) {
1736  setLibcallName(RTLIB::FPTOSINT_F128_I64, "_Q_qtoll");
1737  setLibcallName(RTLIB::FPTOUINT_F128_I64, "_Q_qtoull");
1738  setLibcallName(RTLIB::SINTTOFP_I64_F128, "_Q_lltoq");
1739  setLibcallName(RTLIB::UINTTOFP_I64_F128, "_Q_ulltoq");
1740  }
1741 
1742  } else {
1743  // Custom legalize f128 operations.
1744 
1752 
1756 
1757  // Setup Runtime library names.
1758  if (Subtarget->is64Bit() && !Subtarget->useSoftFloat()) {
1759  setLibcallName(RTLIB::ADD_F128, "_Qp_add");
1760  setLibcallName(RTLIB::SUB_F128, "_Qp_sub");
1761  setLibcallName(RTLIB::MUL_F128, "_Qp_mul");
1762  setLibcallName(RTLIB::DIV_F128, "_Qp_div");
1763  setLibcallName(RTLIB::SQRT_F128, "_Qp_sqrt");
1764  setLibcallName(RTLIB::FPTOSINT_F128_I32, "_Qp_qtoi");
1765  setLibcallName(RTLIB::FPTOUINT_F128_I32, "_Qp_qtoui");
1766  setLibcallName(RTLIB::SINTTOFP_I32_F128, "_Qp_itoq");
1767  setLibcallName(RTLIB::UINTTOFP_I32_F128, "_Qp_uitoq");
1768  setLibcallName(RTLIB::FPTOSINT_F128_I64, "_Qp_qtox");
1769  setLibcallName(RTLIB::FPTOUINT_F128_I64, "_Qp_qtoux");
1770  setLibcallName(RTLIB::SINTTOFP_I64_F128, "_Qp_xtoq");
1771  setLibcallName(RTLIB::UINTTOFP_I64_F128, "_Qp_uxtoq");
1772  setLibcallName(RTLIB::FPEXT_F32_F128, "_Qp_stoq");
1773  setLibcallName(RTLIB::FPEXT_F64_F128, "_Qp_dtoq");
1774  setLibcallName(RTLIB::FPROUND_F128_F32, "_Qp_qtos");
1775  setLibcallName(RTLIB::FPROUND_F128_F64, "_Qp_qtod");
1776  } else if (!Subtarget->useSoftFloat()) {
1777  setLibcallName(RTLIB::ADD_F128, "_Q_add");
1778  setLibcallName(RTLIB::SUB_F128, "_Q_sub");
1779  setLibcallName(RTLIB::MUL_F128, "_Q_mul");
1780  setLibcallName(RTLIB::DIV_F128, "_Q_div");
1781  setLibcallName(RTLIB::SQRT_F128, "_Q_sqrt");
1782  setLibcallName(RTLIB::FPTOSINT_F128_I32, "_Q_qtoi");
1783  setLibcallName(RTLIB::FPTOUINT_F128_I32, "_Q_qtou");
1784  setLibcallName(RTLIB::SINTTOFP_I32_F128, "_Q_itoq");
1785  setLibcallName(RTLIB::UINTTOFP_I32_F128, "_Q_utoq");
1786  setLibcallName(RTLIB::FPTOSINT_F128_I64, "_Q_qtoll");
1787  setLibcallName(RTLIB::FPTOUINT_F128_I64, "_Q_qtoull");
1788  setLibcallName(RTLIB::SINTTOFP_I64_F128, "_Q_lltoq");
1789  setLibcallName(RTLIB::UINTTOFP_I64_F128, "_Q_ulltoq");
1790  setLibcallName(RTLIB::FPEXT_F32_F128, "_Q_stoq");
1791  setLibcallName(RTLIB::FPEXT_F64_F128, "_Q_dtoq");
1792  setLibcallName(RTLIB::FPROUND_F128_F32, "_Q_qtos");
1793  setLibcallName(RTLIB::FPROUND_F128_F64, "_Q_qtod");
1794  }
1795  }
1796 
1797  if (Subtarget->fixAllFDIVSQRT()) {
1798  // Promote FDIVS and FSQRTS to FDIVD and FSQRTD instructions instead as
1799  // the former instructions generate errata on LEON processors.
1802  }
1803 
1804  if (Subtarget->hasNoFMULS()) {
1806  }
1807 
1808  // Custom combine bitcast between f64 and v2i32
1809  if (!Subtarget->is64Bit())
1811 
1812  if (Subtarget->hasLeonCycleCounter())
1814 
1816 
1818 
1820 }
1821 
1823  return Subtarget->useSoftFloat();
1824 }
1825 
1826 const char *SparcTargetLowering::getTargetNodeName(unsigned Opcode) const {
1827  switch ((SPISD::NodeType)Opcode) {
1828  case SPISD::FIRST_NUMBER: break;
1829  case SPISD::CMPICC: return "SPISD::CMPICC";
1830  case SPISD::CMPFCC: return "SPISD::CMPFCC";
1831  case SPISD::BRICC: return "SPISD::BRICC";
1832  case SPISD::BRXCC: return "SPISD::BRXCC";
1833  case SPISD::BRFCC: return "SPISD::BRFCC";
1834  case SPISD::SELECT_ICC: return "SPISD::SELECT_ICC";
1835  case SPISD::SELECT_XCC: return "SPISD::SELECT_XCC";
1836  case SPISD::SELECT_FCC: return "SPISD::SELECT_FCC";
1837  case SPISD::Hi: return "SPISD::Hi";
1838  case SPISD::Lo: return "SPISD::Lo";
1839  case SPISD::FTOI: return "SPISD::FTOI";
1840  case SPISD::ITOF: return "SPISD::ITOF";
1841  case SPISD::FTOX: return "SPISD::FTOX";
1842  case SPISD::XTOF: return "SPISD::XTOF";
1843  case SPISD::CALL: return "SPISD::CALL";
1844  case SPISD::RET_FLAG: return "SPISD::RET_FLAG";
1845  case SPISD::GLOBAL_BASE_REG: return "SPISD::GLOBAL_BASE_REG";
1846  case SPISD::FLUSHW: return "SPISD::FLUSHW";
1847  case SPISD::TLS_ADD: return "SPISD::TLS_ADD";
1848  case SPISD::TLS_LD: return "SPISD::TLS_LD";
1849  case SPISD::TLS_CALL: return "SPISD::TLS_CALL";
1850  }
1851  return nullptr;
1852 }
1853 
1855  EVT VT) const {
1856  if (!VT.isVector())
1857  return MVT::i32;
1859 }
1860 
1861 /// isMaskedValueZeroForTargetNode - Return true if 'Op & Mask' is known to
1862 /// be zero. Op is expected to be a target specific node. Used by DAG
1863 /// combiner.
1865  (const SDValue Op,
1866  KnownBits &Known,
1867  const APInt &DemandedElts,
1868  const SelectionDAG &DAG,
1869  unsigned Depth) const {
1870  KnownBits Known2;
1871  Known.resetAll();
1872 
1873  switch (Op.getOpcode()) {
1874  default: break;
1875  case SPISD::SELECT_ICC:
1876  case SPISD::SELECT_XCC:
1877  case SPISD::SELECT_FCC:
1878  Known = DAG.computeKnownBits(Op.getOperand(1), Depth + 1);
1879  Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
1880 
1881  // Only known if known in both the LHS and RHS.
1882  Known = KnownBits::commonBits(Known, Known2);
1883  break;
1884  }
1885 }
1886 
1887 // Look at LHS/RHS/CC and see if they are a lowered setcc instruction. If so
1888 // set LHS/RHS and SPCC to the LHS/RHS of the setcc and SPCC to the condition.
1889 static void LookThroughSetCC(SDValue &LHS, SDValue &RHS,
1890  ISD::CondCode CC, unsigned &SPCC) {
1891  if (isNullConstant(RHS) &&
1892  CC == ISD::SETNE &&
1893  (((LHS.getOpcode() == SPISD::SELECT_ICC ||
1894  LHS.getOpcode() == SPISD::SELECT_XCC) &&
1895  LHS.getOperand(3).getOpcode() == SPISD::CMPICC) ||
1896  (LHS.getOpcode() == SPISD::SELECT_FCC &&
1897  LHS.getOperand(3).getOpcode() == SPISD::CMPFCC)) &&
1898  isOneConstant(LHS.getOperand(0)) &&
1899  isNullConstant(LHS.getOperand(1))) {
1900  SDValue CMPCC = LHS.getOperand(3);
1901  SPCC = cast<ConstantSDNode>(LHS.getOperand(2))->getZExtValue();
1902  LHS = CMPCC.getOperand(0);
1903  RHS = CMPCC.getOperand(1);
1904  }
1905 }
1906 
1907 // Convert to a target node and set target flags.
1909  SelectionDAG &DAG) const {
1910  if (const GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op))
1911  return DAG.getTargetGlobalAddress(GA->getGlobal(),
1912  SDLoc(GA),
1913  GA->getValueType(0),
1914  GA->getOffset(), TF);
1915 
1916  if (const ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op))
1917  return DAG.getTargetConstantPool(CP->getConstVal(), CP->getValueType(0),
1918  CP->getAlign(), CP->getOffset(), TF);
1919 
1920  if (const BlockAddressSDNode *BA = dyn_cast<BlockAddressSDNode>(Op))
1921  return DAG.getTargetBlockAddress(BA->getBlockAddress(),
1922  Op.getValueType(),
1923  0,
1924  TF);
1925 
1926  if (const ExternalSymbolSDNode *ES = dyn_cast<ExternalSymbolSDNode>(Op))
1927  return DAG.getTargetExternalSymbol(ES->getSymbol(),
1928  ES->getValueType(0), TF);
1929 
1930  llvm_unreachable("Unhandled address SDNode");
1931 }
1932 
1933 // Split Op into high and low parts according to HiTF and LoTF.
1934 // Return an ADD node combining the parts.
1936  unsigned HiTF, unsigned LoTF,
1937  SelectionDAG &DAG) const {
1938  SDLoc DL(Op);
1939  EVT VT = Op.getValueType();
1940  SDValue Hi = DAG.getNode(SPISD::Hi, DL, VT, withTargetFlags(Op, HiTF, DAG));
1941  SDValue Lo = DAG.getNode(SPISD::Lo, DL, VT, withTargetFlags(Op, LoTF, DAG));
1942  return DAG.getNode(ISD::ADD, DL, VT, Hi, Lo);
1943 }
1944 
1945 // Build SDNodes for producing an address from a GlobalAddress, ConstantPool,
1946 // or ExternalSymbol SDNode.
1948  SDLoc DL(Op);
1949  EVT VT = getPointerTy(DAG.getDataLayout());
1950 
1951  // Handle PIC mode first. SPARC needs a got load for every variable!
1952  if (isPositionIndependent()) {
1953  const Module *M = DAG.getMachineFunction().getFunction().getParent();
1954  PICLevel::Level picLevel = M->getPICLevel();
1955  SDValue Idx;
1956 
1957  if (picLevel == PICLevel::SmallPIC) {
1958  // This is the pic13 code model, the GOT is known to be smaller than 8KiB.
1959  Idx = DAG.getNode(SPISD::Lo, DL, Op.getValueType(),
1961  } else {
1962  // This is the pic32 code model, the GOT is known to be smaller than 4GB.
1965  }
1966 
1967  SDValue GlobalBase = DAG.getNode(SPISD::GLOBAL_BASE_REG, DL, VT);
1968  SDValue AbsAddr = DAG.getNode(ISD::ADD, DL, VT, GlobalBase, Idx);
1969  // GLOBAL_BASE_REG codegen'ed with call. Inform MFI that this
1970  // function has calls.
1972  MFI.setHasCalls(true);
1973  return DAG.getLoad(VT, DL, DAG.getEntryNode(), AbsAddr,
1975  }
1976 
1977  // This is one of the absolute code models.
1978  switch(getTargetMachine().getCodeModel()) {
1979  default:
1980  llvm_unreachable("Unsupported absolute code model");
1981  case CodeModel::Small:
1982  // abs32.
1985  case CodeModel::Medium: {
1986  // abs44.
1989  H44 = DAG.getNode(ISD::SHL, DL, VT, H44, DAG.getConstant(12, DL, MVT::i32));
1991  L44 = DAG.getNode(SPISD::Lo, DL, VT, L44);
1992  return DAG.getNode(ISD::ADD, DL, VT, H44, L44);
1993  }
1994  case CodeModel::Large: {
1995  // abs64.
1998  Hi = DAG.getNode(ISD::SHL, DL, VT, Hi, DAG.getConstant(32, DL, MVT::i32));
2001  return DAG.getNode(ISD::ADD, DL, VT, Hi, Lo);
2002  }
2003  }
2004 }
2005 
2007  SelectionDAG &DAG) const {
2008  return makeAddress(Op, DAG);
2009 }
2010 
2012  SelectionDAG &DAG) const {
2013  return makeAddress(Op, DAG);
2014 }
2015 
2017  SelectionDAG &DAG) const {
2018  return makeAddress(Op, DAG);
2019 }
2020 
2022  SelectionDAG &DAG) const {
2023 
2024  GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
2025  if (DAG.getTarget().useEmulatedTLS())
2026  return LowerToTLSEmulatedModel(GA, DAG);
2027 
2028  SDLoc DL(GA);
2029  const GlobalValue *GV = GA->getGlobal();
2030  EVT PtrVT = getPointerTy(DAG.getDataLayout());
2031 
2033 
2035  unsigned HiTF = ((model == TLSModel::GeneralDynamic)
2038  unsigned LoTF = ((model == TLSModel::GeneralDynamic)
2041  unsigned addTF = ((model == TLSModel::GeneralDynamic)
2044  unsigned callTF = ((model == TLSModel::GeneralDynamic)
2047 
2048  SDValue HiLo = makeHiLoPair(Op, HiTF, LoTF, DAG);
2049  SDValue Base = DAG.getNode(SPISD::GLOBAL_BASE_REG, DL, PtrVT);
2050  SDValue Argument = DAG.getNode(SPISD::TLS_ADD, DL, PtrVT, Base, HiLo,
2051  withTargetFlags(Op, addTF, DAG));
2052 
2053  SDValue Chain = DAG.getEntryNode();
2054  SDValue InFlag;
2055 
2056  Chain = DAG.getCALLSEQ_START(Chain, 1, 0, DL);
2057  Chain = DAG.getCopyToReg(Chain, DL, SP::O0, Argument, InFlag);
2058  InFlag = Chain.getValue(1);
2059  SDValue Callee = DAG.getTargetExternalSymbol("__tls_get_addr", PtrVT);
2060  SDValue Symbol = withTargetFlags(Op, callTF, DAG);
2061 
2062  SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
2063  const uint32_t *Mask = Subtarget->getRegisterInfo()->getCallPreservedMask(
2065  assert(Mask && "Missing call preserved mask for calling convention");
2066  SDValue Ops[] = {Chain,
2067  Callee,
2068  Symbol,
2069  DAG.getRegister(SP::O0, PtrVT),
2070  DAG.getRegisterMask(Mask),
2071  InFlag};
2072  Chain = DAG.getNode(SPISD::TLS_CALL, DL, NodeTys, Ops);
2073  InFlag = Chain.getValue(1);
2074  Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(1, DL, true),
2075  DAG.getIntPtrConstant(0, DL, true), InFlag, DL);
2076  InFlag = Chain.getValue(1);
2077  SDValue Ret = DAG.getCopyFromReg(Chain, DL, SP::O0, PtrVT, InFlag);
2078 
2080  return Ret;
2081 
2082  SDValue Hi = DAG.getNode(SPISD::Hi, DL, PtrVT,
2084  SDValue Lo = DAG.getNode(SPISD::Lo, DL, PtrVT,
2086  HiLo = DAG.getNode(ISD::XOR, DL, PtrVT, Hi, Lo);
2087  return DAG.getNode(SPISD::TLS_ADD, DL, PtrVT, Ret, HiLo,
2089  }
2090 
2091  if (model == TLSModel::InitialExec) {
2092  unsigned ldTF = ((PtrVT == MVT::i64)? SparcMCExpr::VK_Sparc_TLS_IE_LDX
2094 
2095  SDValue Base = DAG.getNode(SPISD::GLOBAL_BASE_REG, DL, PtrVT);
2096 
2097  // GLOBAL_BASE_REG codegen'ed with call. Inform MFI that this
2098  // function has calls.
2100  MFI.setHasCalls(true);
2101 
2102  SDValue TGA = makeHiLoPair(Op,
2105  SDValue Ptr = DAG.getNode(ISD::ADD, DL, PtrVT, Base, TGA);
2107  DL, PtrVT, Ptr,
2108  withTargetFlags(Op, ldTF, DAG));
2109  return DAG.getNode(SPISD::TLS_ADD, DL, PtrVT,
2110  DAG.getRegister(SP::G7, PtrVT), Offset,
2113  }
2114 
2116  SDValue Hi = DAG.getNode(SPISD::Hi, DL, PtrVT,
2118  SDValue Lo = DAG.getNode(SPISD::Lo, DL, PtrVT,
2120  SDValue Offset = DAG.getNode(ISD::XOR, DL, PtrVT, Hi, Lo);
2121 
2122  return DAG.getNode(ISD::ADD, DL, PtrVT,
2123  DAG.getRegister(SP::G7, PtrVT), Offset);
2124 }
2125 
2128  const SDLoc &DL,
2129  SelectionDAG &DAG) const {
2131  EVT ArgVT = Arg.getValueType();
2132  Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
2133 
2134  ArgListEntry Entry;
2135  Entry.Node = Arg;
2136  Entry.Ty = ArgTy;
2137 
2138  if (ArgTy->isFP128Ty()) {
2139  // Create a stack object and pass the pointer to the library function.
2140  int FI = MFI.CreateStackObject(16, Align(8), false);
2141  SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
2142  Chain = DAG.getStore(Chain, DL, Entry.Node, FIPtr, MachinePointerInfo(),
2143  Align(8));
2144 
2145  Entry.Node = FIPtr;
2146  Entry.Ty = PointerType::getUnqual(ArgTy);
2147  }
2148  Args.push_back(Entry);
2149  return Chain;
2150 }
2151 
2152 SDValue
2154  const char *LibFuncName,
2155  unsigned numArgs) const {
2156 
2157  ArgListTy Args;
2158 
2160  auto PtrVT = getPointerTy(DAG.getDataLayout());
2161 
2162  SDValue Callee = DAG.getExternalSymbol(LibFuncName, PtrVT);
2163  Type *RetTy = Op.getValueType().getTypeForEVT(*DAG.getContext());
2164  Type *RetTyABI = RetTy;
2165  SDValue Chain = DAG.getEntryNode();
2166  SDValue RetPtr;
2167 
2168  if (RetTy->isFP128Ty()) {
2169  // Create a Stack Object to receive the return value of type f128.
2170  ArgListEntry Entry;
2171  int RetFI = MFI.CreateStackObject(16, Align(8), false);
2172  RetPtr = DAG.getFrameIndex(RetFI, PtrVT);
2173  Entry.Node = RetPtr;
2174  Entry.Ty = PointerType::getUnqual(RetTy);
2175  if (!Subtarget->is64Bit())
2176  Entry.IsSRet = true;
2177  Entry.IsReturned = false;
2178  Args.push_back(Entry);
2179  RetTyABI = Type::getVoidTy(*DAG.getContext());
2180  }
2181 
2182  assert(Op->getNumOperands() >= numArgs && "Not enough operands!");
2183  for (unsigned i = 0, e = numArgs; i != e; ++i) {
2184  Chain = LowerF128_LibCallArg(Chain, Args, Op.getOperand(i), SDLoc(Op), DAG);
2185  }
2187  CLI.setDebugLoc(SDLoc(Op)).setChain(Chain)
2188  .setCallee(CallingConv::C, RetTyABI, Callee, std::move(Args));
2189 
2190  std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
2191 
2192  // chain is in second result.
2193  if (RetTyABI == RetTy)
2194  return CallInfo.first;
2195 
2196  assert (RetTy->isFP128Ty() && "Unexpected return type!");
2197 
2198  Chain = CallInfo.second;
2199 
2200  // Load RetPtr to get the return value.
2201  return DAG.getLoad(Op.getValueType(), SDLoc(Op), Chain, RetPtr,
2202  MachinePointerInfo(), Align(8));
2203 }
2204 
2206  unsigned &SPCC, const SDLoc &DL,
2207  SelectionDAG &DAG) const {
2208 
2209  const char *LibCall = nullptr;
2210  bool is64Bit = Subtarget->is64Bit();
2211  switch(SPCC) {
2212  default: llvm_unreachable("Unhandled conditional code!");
2213  case SPCC::FCC_E : LibCall = is64Bit? "_Qp_feq" : "_Q_feq"; break;
2214  case SPCC::FCC_NE : LibCall = is64Bit? "_Qp_fne" : "_Q_fne"; break;
2215  case SPCC::FCC_L : LibCall = is64Bit? "_Qp_flt" : "_Q_flt"; break;
2216  case SPCC::FCC_G : LibCall = is64Bit? "_Qp_fgt" : "_Q_fgt"; break;
2217  case SPCC::FCC_LE : LibCall = is64Bit? "_Qp_fle" : "_Q_fle"; break;
2218  case SPCC::FCC_GE : LibCall = is64Bit? "_Qp_fge" : "_Q_fge"; break;
2219  case SPCC::FCC_UL :
2220  case SPCC::FCC_ULE:
2221  case SPCC::FCC_UG :
2222  case SPCC::FCC_UGE:
2223  case SPCC::FCC_U :
2224  case SPCC::FCC_O :
2225  case SPCC::FCC_LG :
2226  case SPCC::FCC_UE : LibCall = is64Bit? "_Qp_cmp" : "_Q_cmp"; break;
2227  }
2228 
2229  auto PtrVT = getPointerTy(DAG.getDataLayout());
2230  SDValue Callee = DAG.getExternalSymbol(LibCall, PtrVT);
2231  Type *RetTy = Type::getInt32Ty(*DAG.getContext());
2232  ArgListTy Args;
2233  SDValue Chain = DAG.getEntryNode();
2234  Chain = LowerF128_LibCallArg(Chain, Args, LHS, DL, DAG);
2235  Chain = LowerF128_LibCallArg(Chain, Args, RHS, DL, DAG);
2236 
2238  CLI.setDebugLoc(DL).setChain(Chain)
2240 
2241  std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
2242 
2243  // result is in first, and chain is in second result.
2244  SDValue Result = CallInfo.first;
2245 
2246  switch(SPCC) {
2247  default: {
2248  SDValue RHS = DAG.getConstant(0, DL, Result.getValueType());
2249  SPCC = SPCC::ICC_NE;
2250  return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2251  }
2252  case SPCC::FCC_UL : {
2253  SDValue Mask = DAG.getConstant(1, DL, Result.getValueType());
2254  Result = DAG.getNode(ISD::AND, DL, Result.getValueType(), Result, Mask);
2255  SDValue RHS = DAG.getConstant(0, DL, Result.getValueType());
2256  SPCC = SPCC::ICC_NE;
2257  return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2258  }
2259  case SPCC::FCC_ULE: {
2260  SDValue RHS = DAG.getConstant(2, DL, Result.getValueType());
2261  SPCC = SPCC::ICC_NE;
2262  return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2263  }
2264  case SPCC::FCC_UG : {
2265  SDValue RHS = DAG.getConstant(1, DL, Result.getValueType());
2266  SPCC = SPCC::ICC_G;
2267  return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2268  }
2269  case SPCC::FCC_UGE: {
2270  SDValue RHS = DAG.getConstant(1, DL, Result.getValueType());
2271  SPCC = SPCC::ICC_NE;
2272  return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2273  }
2274 
2275  case SPCC::FCC_U : {
2276  SDValue RHS = DAG.getConstant(3, DL, Result.getValueType());
2277  SPCC = SPCC::ICC_E;
2278  return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2279  }
2280  case SPCC::FCC_O : {
2281  SDValue RHS = DAG.getConstant(3, DL, Result.getValueType());
2282  SPCC = SPCC::ICC_NE;
2283  return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2284  }
2285  case SPCC::FCC_LG : {
2286  SDValue Mask = DAG.getConstant(3, DL, Result.getValueType());
2287  Result = DAG.getNode(ISD::AND, DL, Result.getValueType(), Result, Mask);
2288  SDValue RHS = DAG.getConstant(0, DL, Result.getValueType());
2289  SPCC = SPCC::ICC_NE;
2290  return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2291  }
2292  case SPCC::FCC_UE : {
2293  SDValue Mask = DAG.getConstant(3, DL, Result.getValueType());
2294  Result = DAG.getNode(ISD::AND, DL, Result.getValueType(), Result, Mask);
2295  SDValue RHS = DAG.getConstant(0, DL, Result.getValueType());
2296  SPCC = SPCC::ICC_E;
2297  return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2298  }
2299  }
2300 }
2301 
2302 static SDValue
2304  const SparcTargetLowering &TLI) {
2305 
2306  if (Op.getOperand(0).getValueType() == MVT::f64)
2307  return TLI.LowerF128Op(Op, DAG,
2308  TLI.getLibcallName(RTLIB::FPEXT_F64_F128), 1);
2309 
2310  if (Op.getOperand(0).getValueType() == MVT::f32)
2311  return TLI.LowerF128Op(Op, DAG,
2312  TLI.getLibcallName(RTLIB::FPEXT_F32_F128), 1);
2313 
2314  llvm_unreachable("fpextend with non-float operand!");
2315  return SDValue();
2316 }
2317 
2318 static SDValue
2320  const SparcTargetLowering &TLI) {
2321  // FP_ROUND on f64 and f32 are legal.
2322  if (Op.getOperand(0).getValueType() != MVT::f128)
2323  return Op;
2324 
2325  if (Op.getValueType() == MVT::f64)
2326  return TLI.LowerF128Op(Op, DAG,
2327  TLI.getLibcallName(RTLIB::FPROUND_F128_F64), 1);
2328  if (Op.getValueType() == MVT::f32)
2329  return TLI.LowerF128Op(Op, DAG,
2330  TLI.getLibcallName(RTLIB::FPROUND_F128_F32), 1);
2331 
2332  llvm_unreachable("fpround to non-float!");
2333  return SDValue();
2334 }
2335 
2337  const SparcTargetLowering &TLI,
2338  bool hasHardQuad) {
2339  SDLoc dl(Op);
2340  EVT VT = Op.getValueType();
2341  assert(VT == MVT::i32 || VT == MVT::i64);
2342 
2343  // Expand f128 operations to fp128 abi calls.
2344  if (Op.getOperand(0).getValueType() == MVT::f128
2345  && (!hasHardQuad || !TLI.isTypeLegal(VT))) {
2346  const char *libName = TLI.getLibcallName(VT == MVT::i32
2347  ? RTLIB::FPTOSINT_F128_I32
2348  : RTLIB::FPTOSINT_F128_I64);
2349  return TLI.LowerF128Op(Op, DAG, libName, 1);
2350  }
2351 
2352  // Expand if the resulting type is illegal.
2353  if (!TLI.isTypeLegal(VT))
2354  return SDValue();
2355 
2356  // Otherwise, Convert the fp value to integer in an FP register.
2357  if (VT == MVT::i32)
2358  Op = DAG.getNode(SPISD::FTOI, dl, MVT::f32, Op.getOperand(0));
2359  else
2360  Op = DAG.getNode(SPISD::FTOX, dl, MVT::f64, Op.getOperand(0));
2361 
2362  return DAG.getNode(ISD::BITCAST, dl, VT, Op);
2363 }
2364 
2366  const SparcTargetLowering &TLI,
2367  bool hasHardQuad) {
2368  SDLoc dl(Op);
2369  EVT OpVT = Op.getOperand(0).getValueType();
2370  assert(OpVT == MVT::i32 || (OpVT == MVT::i64));
2371 
2372  EVT floatVT = (OpVT == MVT::i32) ? MVT::f32 : MVT::f64;
2373 
2374  // Expand f128 operations to fp128 ABI calls.
2375  if (Op.getValueType() == MVT::f128
2376  && (!hasHardQuad || !TLI.isTypeLegal(OpVT))) {
2377  const char *libName = TLI.getLibcallName(OpVT == MVT::i32
2378  ? RTLIB::SINTTOFP_I32_F128
2379  : RTLIB::SINTTOFP_I64_F128);
2380  return TLI.LowerF128Op(Op, DAG, libName, 1);
2381  }
2382 
2383  // Expand if the operand type is illegal.
2384  if (!TLI.isTypeLegal(OpVT))
2385  return SDValue();
2386 
2387  // Otherwise, Convert the int value to FP in an FP register.
2388  SDValue Tmp = DAG.getNode(ISD::BITCAST, dl, floatVT, Op.getOperand(0));
2389  unsigned opcode = (OpVT == MVT::i32)? SPISD::ITOF : SPISD::XTOF;
2390  return DAG.getNode(opcode, dl, Op.getValueType(), Tmp);
2391 }
2392 
2394  const SparcTargetLowering &TLI,
2395  bool hasHardQuad) {
2396  SDLoc dl(Op);
2397  EVT VT = Op.getValueType();
2398 
2399  // Expand if it does not involve f128 or the target has support for
2400  // quad floating point instructions and the resulting type is legal.
2401  if (Op.getOperand(0).getValueType() != MVT::f128 ||
2402  (hasHardQuad && TLI.isTypeLegal(VT)))
2403  return SDValue();
2404 
2405  assert(VT == MVT::i32 || VT == MVT::i64);
2406 
2407  return TLI.LowerF128Op(Op, DAG,
2408  TLI.getLibcallName(VT == MVT::i32
2409  ? RTLIB::FPTOUINT_F128_I32
2410  : RTLIB::FPTOUINT_F128_I64),
2411  1);
2412 }
2413 
2415  const SparcTargetLowering &TLI,
2416  bool hasHardQuad) {
2417  SDLoc dl(Op);
2418  EVT OpVT = Op.getOperand(0).getValueType();
2419  assert(OpVT == MVT::i32 || OpVT == MVT::i64);
2420 
2421  // Expand if it does not involve f128 or the target has support for
2422  // quad floating point instructions and the operand type is legal.
2423  if (Op.getValueType() != MVT::f128 || (hasHardQuad && TLI.isTypeLegal(OpVT)))
2424  return SDValue();
2425 
2426  return TLI.LowerF128Op(Op, DAG,
2427  TLI.getLibcallName(OpVT == MVT::i32
2428  ? RTLIB::UINTTOFP_I32_F128
2429  : RTLIB::UINTTOFP_I64_F128),
2430  1);
2431 }
2432 
2434  const SparcTargetLowering &TLI,
2435  bool hasHardQuad) {
2436  SDValue Chain = Op.getOperand(0);
2437  ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
2438  SDValue LHS = Op.getOperand(2);
2439  SDValue RHS = Op.getOperand(3);
2440  SDValue Dest = Op.getOperand(4);
2441  SDLoc dl(Op);
2442  unsigned Opc, SPCC = ~0U;
2443 
2444  // If this is a br_cc of a "setcc", and if the setcc got lowered into
2445  // an CMP[IF]CC/SELECT_[IF]CC pair, find the original compared values.
2446  LookThroughSetCC(LHS, RHS, CC, SPCC);
2447 
2448  // Get the condition flag.
2449  SDValue CompareFlag;
2450  if (LHS.getValueType().isInteger()) {
2451  CompareFlag = DAG.getNode(SPISD::CMPICC, dl, MVT::Glue, LHS, RHS);
2452  if (SPCC == ~0U) SPCC = IntCondCCodeToICC(CC);
2453  // 32-bit compares use the icc flags, 64-bit uses the xcc flags.
2454  Opc = LHS.getValueType() == MVT::i32 ? SPISD::BRICC : SPISD::BRXCC;
2455  } else {
2456  if (!hasHardQuad && LHS.getValueType() == MVT::f128) {
2457  if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
2458  CompareFlag = TLI.LowerF128Compare(LHS, RHS, SPCC, dl, DAG);
2459  Opc = SPISD::BRICC;
2460  } else {
2461  CompareFlag = DAG.getNode(SPISD::CMPFCC, dl, MVT::Glue, LHS, RHS);
2462  if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
2463  Opc = SPISD::BRFCC;
2464  }
2465  }
2466  return DAG.getNode(Opc, dl, MVT::Other, Chain, Dest,
2467  DAG.getConstant(SPCC, dl, MVT::i32), CompareFlag);
2468 }
2469 
2471  const SparcTargetLowering &TLI,
2472  bool hasHardQuad) {
2473  SDValue LHS = Op.getOperand(0);
2474  SDValue RHS = Op.getOperand(1);
2475  ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
2476  SDValue TrueVal = Op.getOperand(2);
2477  SDValue FalseVal = Op.getOperand(3);
2478  SDLoc dl(Op);
2479  unsigned Opc, SPCC = ~0U;
2480 
2481  // If this is a select_cc of a "setcc", and if the setcc got lowered into
2482  // an CMP[IF]CC/SELECT_[IF]CC pair, find the original compared values.
2483  LookThroughSetCC(LHS, RHS, CC, SPCC);
2484 
2485  SDValue CompareFlag;
2486  if (LHS.getValueType().isInteger()) {
2487  CompareFlag = DAG.getNode(SPISD::CMPICC, dl, MVT::Glue, LHS, RHS);
2488  Opc = LHS.getValueType() == MVT::i32 ?
2490  if (SPCC == ~0U) SPCC = IntCondCCodeToICC(CC);
2491  } else {
2492  if (!hasHardQuad && LHS.getValueType() == MVT::f128) {
2493  if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
2494  CompareFlag = TLI.LowerF128Compare(LHS, RHS, SPCC, dl, DAG);
2495  Opc = SPISD::SELECT_ICC;
2496  } else {
2497  CompareFlag = DAG.getNode(SPISD::CMPFCC, dl, MVT::Glue, LHS, RHS);
2498  Opc = SPISD::SELECT_FCC;
2499  if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
2500  }
2501  }
2502  return DAG.getNode(Opc, dl, TrueVal.getValueType(), TrueVal, FalseVal,
2503  DAG.getConstant(SPCC, dl, MVT::i32), CompareFlag);
2504 }
2505 
2507  const SparcTargetLowering &TLI) {
2508  MachineFunction &MF = DAG.getMachineFunction();
2510  auto PtrVT = TLI.getPointerTy(DAG.getDataLayout());
2511 
2512  // Need frame address to find the address of VarArgsFrameIndex.
2514 
2515  // vastart just stores the address of the VarArgsFrameIndex slot into the
2516  // memory location argument.
2517  SDLoc DL(Op);
2518  SDValue Offset =
2519  DAG.getNode(ISD::ADD, DL, PtrVT, DAG.getRegister(SP::I6, PtrVT),
2520  DAG.getIntPtrConstant(FuncInfo->getVarArgsFrameOffset(), DL));
2521  const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
2522  return DAG.getStore(Op.getOperand(0), DL, Offset, Op.getOperand(1),
2523  MachinePointerInfo(SV));
2524 }
2525 
2527  SDNode *Node = Op.getNode();
2528  EVT VT = Node->getValueType(0);
2529  SDValue InChain = Node->getOperand(0);
2530  SDValue VAListPtr = Node->getOperand(1);
2531  EVT PtrVT = VAListPtr.getValueType();
2532  const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
2533  SDLoc DL(Node);
2534  SDValue VAList =
2535  DAG.getLoad(PtrVT, DL, InChain, VAListPtr, MachinePointerInfo(SV));
2536  // Increment the pointer, VAList, to the next vaarg.
2537  SDValue NextPtr = DAG.getNode(ISD::ADD, DL, PtrVT, VAList,
2538  DAG.getIntPtrConstant(VT.getSizeInBits()/8,
2539  DL));
2540  // Store the incremented VAList to the legalized pointer.
2541  InChain = DAG.getStore(VAList.getValue(1), DL, NextPtr, VAListPtr,
2542  MachinePointerInfo(SV));
2543  // Load the actual argument out of the pointer VAList.
2544  // We can't count on greater alignment than the word size.
2545  return DAG.getLoad(
2546  VT, DL, InChain, VAList, MachinePointerInfo(),
2547  std::min(PtrVT.getFixedSizeInBits(), VT.getFixedSizeInBits()) / 8);
2548 }
2549 
2551  const SparcSubtarget *Subtarget) {
2552  SDValue Chain = Op.getOperand(0); // Legalize the chain.
2553  SDValue Size = Op.getOperand(1); // Legalize the size.
2554  MaybeAlign Alignment =
2555  cast<ConstantSDNode>(Op.getOperand(2))->getMaybeAlignValue();
2556  Align StackAlign = Subtarget->getFrameLowering()->getStackAlign();
2557  EVT VT = Size->getValueType(0);
2558  SDLoc dl(Op);
2559 
2560  // TODO: implement over-aligned alloca. (Note: also implies
2561  // supporting support for overaligned function frames + dynamic
2562  // allocations, at all, which currently isn't supported)
2563  if (Alignment && *Alignment > StackAlign) {
2564  const MachineFunction &MF = DAG.getMachineFunction();
2565  report_fatal_error("Function \"" + Twine(MF.getName()) + "\": "
2566  "over-aligned dynamic alloca not supported.");
2567  }
2568 
2569  // The resultant pointer needs to be above the register spill area
2570  // at the bottom of the stack.
2571  unsigned regSpillArea;
2572  if (Subtarget->is64Bit()) {
2573  regSpillArea = 128;
2574  } else {
2575  // On Sparc32, the size of the spill area is 92. Unfortunately,
2576  // that's only 4-byte aligned, not 8-byte aligned (the stack
2577  // pointer is 8-byte aligned). So, if the user asked for an 8-byte
2578  // aligned dynamic allocation, we actually need to add 96 to the
2579  // bottom of the stack, instead of 92, to ensure 8-byte alignment.
2580 
2581  // That also means adding 4 to the size of the allocation --
2582  // before applying the 8-byte rounding. Unfortunately, we the
2583  // value we get here has already had rounding applied. So, we need
2584  // to add 8, instead, wasting a bit more memory.
2585 
2586  // Further, this only actually needs to be done if the required
2587  // alignment is > 4, but, we've lost that info by this point, too,
2588  // so we always apply it.
2589 
2590  // (An alternative approach would be to always reserve 96 bytes
2591  // instead of the required 92, but then we'd waste 4 extra bytes
2592  // in every frame, not just those with dynamic stack allocations)
2593 
2594  // TODO: modify code in SelectionDAGBuilder to make this less sad.
2595 
2596  Size = DAG.getNode(ISD::ADD, dl, VT, Size,
2597  DAG.getConstant(8, dl, VT));
2598  regSpillArea = 96;
2599  }
2600 
2601  unsigned SPReg = SP::O6;
2602  SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, VT);
2603  SDValue NewSP = DAG.getNode(ISD::SUB, dl, VT, SP, Size); // Value
2604  Chain = DAG.getCopyToReg(SP.getValue(1), dl, SPReg, NewSP); // Output chain
2605 
2606  regSpillArea += Subtarget->getStackPointerBias();
2607 
2608  SDValue NewVal = DAG.getNode(ISD::ADD, dl, VT, NewSP,
2609  DAG.getConstant(regSpillArea, dl, VT));
2610  SDValue Ops[2] = { NewVal, Chain };
2611  return DAG.getMergeValues(Ops, dl);
2612 }
2613 
2614 
2616  SDLoc dl(Op);
2617  SDValue Chain = DAG.getNode(SPISD::FLUSHW,
2618  dl, MVT::Other, DAG.getEntryNode());
2619  return Chain;
2620 }
2621 
2622 static SDValue getFRAMEADDR(uint64_t depth, SDValue Op, SelectionDAG &DAG,
2623  const SparcSubtarget *Subtarget,
2624  bool AlwaysFlush = false) {
2626  MFI.setFrameAddressIsTaken(true);
2627 
2628  EVT VT = Op.getValueType();
2629  SDLoc dl(Op);
2630  unsigned FrameReg = SP::I6;
2631  unsigned stackBias = Subtarget->getStackPointerBias();
2632 
2633  SDValue FrameAddr;
2634  SDValue Chain;
2635 
2636  // flush first to make sure the windowed registers' values are in stack
2637  Chain = (depth || AlwaysFlush) ? getFLUSHW(Op, DAG) : DAG.getEntryNode();
2638 
2639  FrameAddr = DAG.getCopyFromReg(Chain, dl, FrameReg, VT);
2640 
2641  unsigned Offset = (Subtarget->is64Bit()) ? (stackBias + 112) : 56;
2642 
2643  while (depth--) {
2644  SDValue Ptr = DAG.getNode(ISD::ADD, dl, VT, FrameAddr,
2645  DAG.getIntPtrConstant(Offset, dl));
2646  FrameAddr = DAG.getLoad(VT, dl, Chain, Ptr, MachinePointerInfo());
2647  }
2648  if (Subtarget->is64Bit())
2649  FrameAddr = DAG.getNode(ISD::ADD, dl, VT, FrameAddr,
2650  DAG.getIntPtrConstant(stackBias, dl));
2651  return FrameAddr;
2652 }
2653 
2654 
2656  const SparcSubtarget *Subtarget) {
2657 
2658  uint64_t depth = Op.getConstantOperandVal(0);
2659 
2660  return getFRAMEADDR(depth, Op, DAG, Subtarget);
2661 
2662 }
2663 
2665  const SparcTargetLowering &TLI,
2666  const SparcSubtarget *Subtarget) {
2667  MachineFunction &MF = DAG.getMachineFunction();
2668  MachineFrameInfo &MFI = MF.getFrameInfo();
2669  MFI.setReturnAddressIsTaken(true);
2670 
2672  return SDValue();
2673 
2674  EVT VT = Op.getValueType();
2675  SDLoc dl(Op);
2676  uint64_t depth = Op.getConstantOperandVal(0);
2677 
2678  SDValue RetAddr;
2679  if (depth == 0) {
2680  auto PtrVT = TLI.getPointerTy(DAG.getDataLayout());
2681  unsigned RetReg = MF.addLiveIn(SP::I7, TLI.getRegClassFor(PtrVT));
2682  RetAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, RetReg, VT);
2683  return RetAddr;
2684  }
2685 
2686  // Need frame address to find return address of the caller.
2687  SDValue FrameAddr = getFRAMEADDR(depth - 1, Op, DAG, Subtarget, true);
2688 
2689  unsigned Offset = (Subtarget->is64Bit()) ? 120 : 60;
2690  SDValue Ptr = DAG.getNode(ISD::ADD,
2691  dl, VT,
2692  FrameAddr,
2693  DAG.getIntPtrConstant(Offset, dl));
2694  RetAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), Ptr, MachinePointerInfo());
2695 
2696  return RetAddr;
2697 }
2698 
2699 static SDValue LowerF64Op(SDValue SrcReg64, const SDLoc &dl, SelectionDAG &DAG,
2700  unsigned opcode) {
2701  assert(SrcReg64.getValueType() == MVT::f64 && "LowerF64Op called on non-double!");
2702  assert(opcode == ISD::FNEG || opcode == ISD::FABS);
2703 
2704  // Lower fneg/fabs on f64 to fneg/fabs on f32.
2705  // fneg f64 => fneg f32:sub_even, fmov f32:sub_odd.
2706  // fabs f64 => fabs f32:sub_even, fmov f32:sub_odd.
2707 
2708  // Note: in little-endian, the floating-point value is stored in the
2709  // registers are in the opposite order, so the subreg with the sign
2710  // bit is the highest-numbered (odd), rather than the
2711  // lowest-numbered (even).
2712 
2713  SDValue Hi32 = DAG.getTargetExtractSubreg(SP::sub_even, dl, MVT::f32,
2714  SrcReg64);
2715  SDValue Lo32 = DAG.getTargetExtractSubreg(SP::sub_odd, dl, MVT::f32,
2716  SrcReg64);
2717 
2718  if (DAG.getDataLayout().isLittleEndian())
2719  Lo32 = DAG.getNode(opcode, dl, MVT::f32, Lo32);
2720  else
2721  Hi32 = DAG.getNode(opcode, dl, MVT::f32, Hi32);
2722 
2723  SDValue DstReg64 = SDValue(DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF,
2724  dl, MVT::f64), 0);
2725  DstReg64 = DAG.getTargetInsertSubreg(SP::sub_even, dl, MVT::f64,
2726  DstReg64, Hi32);
2727  DstReg64 = DAG.getTargetInsertSubreg(SP::sub_odd, dl, MVT::f64,
2728  DstReg64, Lo32);
2729  return DstReg64;
2730 }
2731 
2732 // Lower a f128 load into two f64 loads.
2734 {
2735  SDLoc dl(Op);
2736  LoadSDNode *LdNode = cast<LoadSDNode>(Op.getNode());
2737  assert(LdNode->getOffset().isUndef() && "Unexpected node type");
2738 
2739  Align Alignment = commonAlignment(LdNode->getOriginalAlign(), 8);
2740 
2741  SDValue Hi64 =
2742  DAG.getLoad(MVT::f64, dl, LdNode->getChain(), LdNode->getBasePtr(),
2743  LdNode->getPointerInfo(), Alignment);
2744  EVT addrVT = LdNode->getBasePtr().getValueType();
2745  SDValue LoPtr = DAG.getNode(ISD::ADD, dl, addrVT,
2746  LdNode->getBasePtr(),
2747  DAG.getConstant(8, dl, addrVT));
2748  SDValue Lo64 = DAG.getLoad(MVT::f64, dl, LdNode->getChain(), LoPtr,
2749  LdNode->getPointerInfo().getWithOffset(8),
2750  Alignment);
2751 
2752  SDValue SubRegEven = DAG.getTargetConstant(SP::sub_even64, dl, MVT::i32);
2753  SDValue SubRegOdd = DAG.getTargetConstant(SP::sub_odd64, dl, MVT::i32);
2754 
2755  SDNode *InFP128 = DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF,
2756  dl, MVT::f128);
2757  InFP128 = DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, dl,
2758  MVT::f128,
2759  SDValue(InFP128, 0),
2760  Hi64,
2761  SubRegEven);
2762  InFP128 = DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, dl,
2763  MVT::f128,
2764  SDValue(InFP128, 0),
2765  Lo64,
2766  SubRegOdd);
2767  SDValue OutChains[2] = { SDValue(Hi64.getNode(), 1),
2768  SDValue(Lo64.getNode(), 1) };
2769  SDValue OutChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
2770  SDValue Ops[2] = {SDValue(InFP128,0), OutChain};
2771  return DAG.getMergeValues(Ops, dl);
2772 }
2773 
2775 {
2776  LoadSDNode *LdNode = cast<LoadSDNode>(Op.getNode());
2777 
2778  EVT MemVT = LdNode->getMemoryVT();
2779  if (MemVT == MVT::f128)
2780  return LowerF128Load(Op, DAG);
2781 
2782  return Op;
2783 }
2784 
2785 // Lower a f128 store into two f64 stores.
2787  SDLoc dl(Op);
2788  StoreSDNode *StNode = cast<StoreSDNode>(Op.getNode());
2789  assert(StNode->getOffset().isUndef() && "Unexpected node type");
2790 
2791  SDValue SubRegEven = DAG.getTargetConstant(SP::sub_even64, dl, MVT::i32);
2792  SDValue SubRegOdd = DAG.getTargetConstant(SP::sub_odd64, dl, MVT::i32);
2793 
2794  SDNode *Hi64 = DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG,
2795  dl,
2796  MVT::f64,
2797  StNode->getValue(),
2798  SubRegEven);
2799  SDNode *Lo64 = DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG,
2800  dl,
2801  MVT::f64,
2802  StNode->getValue(),
2803  SubRegOdd);
2804 
2805  Align Alignment = commonAlignment(StNode->getOriginalAlign(), 8);
2806 
2807  SDValue OutChains[2];
2808  OutChains[0] =
2809  DAG.getStore(StNode->getChain(), dl, SDValue(Hi64, 0),
2810  StNode->getBasePtr(), StNode->getPointerInfo(),
2811  Alignment);
2812  EVT addrVT = StNode->getBasePtr().getValueType();
2813  SDValue LoPtr = DAG.getNode(ISD::ADD, dl, addrVT,
2814  StNode->getBasePtr(),
2815  DAG.getConstant(8, dl, addrVT));
2816  OutChains[1] = DAG.getStore(StNode->getChain(), dl, SDValue(Lo64, 0), LoPtr,
2817  StNode->getPointerInfo().getWithOffset(8),
2818  Alignment);
2819  return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
2820 }
2821 
2823 {
2824  SDLoc dl(Op);
2825  StoreSDNode *St = cast<StoreSDNode>(Op.getNode());
2826 
2827  EVT MemVT = St->getMemoryVT();
2828  if (MemVT == MVT::f128)
2829  return LowerF128Store(Op, DAG);
2830 
2831  if (MemVT == MVT::i64) {
2832  // Custom handling for i64 stores: turn it into a bitcast and a
2833  // v2i32 store.
2834  SDValue Val = DAG.getNode(ISD::BITCAST, dl, MVT::v2i32, St->getValue());
2835  SDValue Chain = DAG.getStore(
2836  St->getChain(), dl, Val, St->getBasePtr(), St->getPointerInfo(),
2837  St->getOriginalAlign(), St->getMemOperand()->getFlags(),
2838  St->getAAInfo());
2839  return Chain;
2840  }
2841 
2842  return SDValue();
2843 }
2844 
2845 static SDValue LowerFNEGorFABS(SDValue Op, SelectionDAG &DAG, bool isV9) {
2846  assert((Op.getOpcode() == ISD::FNEG || Op.getOpcode() == ISD::FABS)
2847  && "invalid opcode");
2848 
2849  SDLoc dl(Op);
2850 
2851  if (Op.getValueType() == MVT::f64)
2852  return LowerF64Op(Op.getOperand(0), dl, DAG, Op.getOpcode());
2853  if (Op.getValueType() != MVT::f128)
2854  return Op;
2855 
2856  // Lower fabs/fneg on f128 to fabs/fneg on f64
2857  // fabs/fneg f128 => fabs/fneg f64:sub_even64, fmov f64:sub_odd64
2858  // (As with LowerF64Op, on little-endian, we need to negate the odd
2859  // subreg)
2860 
2861  SDValue SrcReg128 = Op.getOperand(0);
2862  SDValue Hi64 = DAG.getTargetExtractSubreg(SP::sub_even64, dl, MVT::f64,
2863  SrcReg128);
2864  SDValue Lo64 = DAG.getTargetExtractSubreg(SP::sub_odd64, dl, MVT::f64,
2865  SrcReg128);
2866 
2867  if (DAG.getDataLayout().isLittleEndian()) {
2868  if (isV9)
2869  Lo64 = DAG.getNode(Op.getOpcode(), dl, MVT::f64, Lo64);
2870  else
2871  Lo64 = LowerF64Op(Lo64, dl, DAG, Op.getOpcode());
2872  } else {
2873  if (isV9)
2874  Hi64 = DAG.getNode(Op.getOpcode(), dl, MVT::f64, Hi64);
2875  else
2876  Hi64 = LowerF64Op(Hi64, dl, DAG, Op.getOpcode());
2877  }
2878 
2879  SDValue DstReg128 = SDValue(DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF,
2880  dl, MVT::f128), 0);
2881  DstReg128 = DAG.getTargetInsertSubreg(SP::sub_even64, dl, MVT::f128,
2882  DstReg128, Hi64);
2883  DstReg128 = DAG.getTargetInsertSubreg(SP::sub_odd64, dl, MVT::f128,
2884  DstReg128, Lo64);
2885  return DstReg128;
2886 }
2887 
2889 
2890  if (Op.getValueType() != MVT::i64)
2891  return Op;
2892 
2893  SDLoc dl(Op);
2894  SDValue Src1 = Op.getOperand(0);
2895  SDValue Src1Lo = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Src1);
2896  SDValue Src1Hi = DAG.getNode(ISD::SRL, dl, MVT::i64, Src1,
2897  DAG.getConstant(32, dl, MVT::i64));
2898  Src1Hi = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Src1Hi);
2899 
2900  SDValue Src2 = Op.getOperand(1);
2901  SDValue Src2Lo = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Src2);
2902  SDValue Src2Hi = DAG.getNode(ISD::SRL, dl, MVT::i64, Src2,
2903  DAG.getConstant(32, dl, MVT::i64));
2904  Src2Hi = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Src2Hi);
2905 
2906 
2907  bool hasChain = false;
2908  unsigned hiOpc = Op.getOpcode();
2909  switch (Op.getOpcode()) {
2910  default: llvm_unreachable("Invalid opcode");
2911  case ISD::ADDC: hiOpc = ISD::ADDE; break;
2912  case ISD::ADDE: hasChain = true; break;
2913  case ISD::SUBC: hiOpc = ISD::SUBE; break;
2914  case ISD::SUBE: hasChain = true; break;
2915  }
2916  SDValue Lo;
2917  SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Glue);
2918  if (hasChain) {
2919  Lo = DAG.getNode(Op.getOpcode(), dl, VTs, Src1Lo, Src2Lo,
2920  Op.getOperand(2));
2921  } else {
2922  Lo = DAG.getNode(Op.getOpcode(), dl, VTs, Src1Lo, Src2Lo);
2923  }
2924  SDValue Hi = DAG.getNode(hiOpc, dl, VTs, Src1Hi, Src2Hi, Lo.getValue(1));
2925  SDValue Carry = Hi.getValue(1);
2926 
2927  Lo = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Lo);
2928  Hi = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Hi);
2929  Hi = DAG.getNode(ISD::SHL, dl, MVT::i64, Hi,
2930  DAG.getConstant(32, dl, MVT::i64));
2931 
2932  SDValue Dst = DAG.getNode(ISD::OR, dl, MVT::i64, Hi, Lo);
2933  SDValue Ops[2] = { Dst, Carry };
2934  return DAG.getMergeValues(Ops, dl);
2935 }
2936 
2937 // Custom lower UMULO/SMULO for SPARC. This code is similar to ExpandNode()
2938 // in LegalizeDAG.cpp except the order of arguments to the library function.
2940  const SparcTargetLowering &TLI)
2941 {
2942  unsigned opcode = Op.getOpcode();
2943  assert((opcode == ISD::UMULO || opcode == ISD::SMULO) && "Invalid Opcode.");
2944 
2945  bool isSigned = (opcode == ISD::SMULO);
2946  EVT VT = MVT::i64;
2947  EVT WideVT = MVT::i128;
2948  SDLoc dl(Op);
2949  SDValue LHS = Op.getOperand(0);
2950 
2951  if (LHS.getValueType() != VT)
2952  return Op;
2953 
2954  SDValue ShiftAmt = DAG.getConstant(63, dl, VT);
2955 
2956  SDValue RHS = Op.getOperand(1);
2957  SDValue HiLHS = DAG.getNode(ISD::SRA, dl, VT, LHS, ShiftAmt);
2958  SDValue HiRHS = DAG.getNode(ISD::SRA, dl, MVT::i64, RHS, ShiftAmt);
2959  SDValue Args[] = { HiLHS, LHS, HiRHS, RHS };
2960 
2962  CallOptions.setSExt(isSigned);
2963  SDValue MulResult = TLI.makeLibCall(DAG,
2964  RTLIB::MUL_I128, WideVT,
2965  Args, CallOptions, dl).first;
2966  SDValue BottomHalf = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VT,
2967  MulResult, DAG.getIntPtrConstant(0, dl));
2968  SDValue TopHalf = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VT,
2969  MulResult, DAG.getIntPtrConstant(1, dl));
2970  if (isSigned) {
2971  SDValue Tmp1 = DAG.getNode(ISD::SRA, dl, VT, BottomHalf, ShiftAmt);
2972  TopHalf = DAG.getSetCC(dl, MVT::i32, TopHalf, Tmp1, ISD::SETNE);
2973  } else {
2974  TopHalf = DAG.getSetCC(dl, MVT::i32, TopHalf, DAG.getConstant(0, dl, VT),
2975  ISD::SETNE);
2976  }
2977  // MulResult is a node with an illegal type. Because such things are not
2978  // generally permitted during this phase of legalization, ensure that
2979  // nothing is left using the node. The above EXTRACT_ELEMENT nodes should have
2980  // been folded.
2981  assert(MulResult->use_empty() && "Illegally typed node still in use!");
2982 
2983  SDValue Ops[2] = { BottomHalf, TopHalf } ;
2984  return DAG.getMergeValues(Ops, dl);
2985 }
2986 
2988  if (isStrongerThanMonotonic(cast<AtomicSDNode>(Op)->getOrdering()))
2989  // Expand with a fence.
2990  return SDValue();
2991 
2992  // Monotonic load/stores are legal.
2993  return Op;
2994 }
2995 
2997  SelectionDAG &DAG) const {
2998  unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
2999  SDLoc dl(Op);
3000  switch (IntNo) {
3001  default: return SDValue(); // Don't custom lower most intrinsics.
3002  case Intrinsic::thread_pointer: {
3003  EVT PtrVT = getPointerTy(DAG.getDataLayout());
3004  return DAG.getRegister(SP::G7, PtrVT);
3005  }
3006  }
3007 }
3008 
3011 
3012  bool hasHardQuad = Subtarget->hasHardQuad();
3013  bool isV9 = Subtarget->isV9();
3014 
3015  switch (Op.getOpcode()) {
3016  default: llvm_unreachable("Should not custom lower this!");
3017 
3018  case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG, *this,
3019  Subtarget);
3020  case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG,
3021  Subtarget);
3022  case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
3023  case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
3024  case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
3025  case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
3026  case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG, *this,
3027  hasHardQuad);
3028  case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG, *this,
3029  hasHardQuad);
3030  case ISD::FP_TO_UINT: return LowerFP_TO_UINT(Op, DAG, *this,
3031  hasHardQuad);
3032  case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG, *this,
3033  hasHardQuad);
3034  case ISD::BR_CC: return LowerBR_CC(Op, DAG, *this,
3035  hasHardQuad);
3036  case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG, *this,
3037  hasHardQuad);
3038  case ISD::VASTART: return LowerVASTART(Op, DAG, *this);
3039  case ISD::VAARG: return LowerVAARG(Op, DAG);
3041  Subtarget);
3042 
3043  case ISD::LOAD: return LowerLOAD(Op, DAG);
3044  case ISD::STORE: return LowerSTORE(Op, DAG);
3045  case ISD::FADD: return LowerF128Op(Op, DAG,
3046  getLibcallName(RTLIB::ADD_F128), 2);
3047  case ISD::FSUB: return LowerF128Op(Op, DAG,
3048  getLibcallName(RTLIB::SUB_F128), 2);
3049  case ISD::FMUL: return LowerF128Op(Op, DAG,
3050  getLibcallName(RTLIB::MUL_F128), 2);
3051  case ISD::FDIV: return LowerF128Op(Op, DAG,
3052  getLibcallName(RTLIB::DIV_F128), 2);
3053  case ISD::FSQRT: return LowerF128Op(Op, DAG,
3054  getLibcallName(RTLIB::SQRT_F128),1);
3055  case ISD::FABS:
3056  case ISD::FNEG: return LowerFNEGorFABS(Op, DAG, isV9);
3057  case ISD::FP_EXTEND: return LowerF128_FPEXTEND(Op, DAG, *this);
3058  case ISD::FP_ROUND: return LowerF128_FPROUND(Op, DAG, *this);
3059  case ISD::ADDC:
3060  case ISD::ADDE:
3061  case ISD::SUBC:
3062  case ISD::SUBE: return LowerADDC_ADDE_SUBC_SUBE(Op, DAG);
3063  case ISD::UMULO:
3064  case ISD::SMULO: return LowerUMULO_SMULO(Op, DAG, *this);
3065  case ISD::ATOMIC_LOAD:
3066  case ISD::ATOMIC_STORE: return LowerATOMIC_LOAD_STORE(Op, DAG);
3068  }
3069 }
3070 
3072  const SDLoc &DL,
3073  SelectionDAG &DAG) const {
3074  APInt V = C->getValueAPF().bitcastToAPInt();
3075  SDValue Lo = DAG.getConstant(V.zextOrTrunc(32), DL, MVT::i32);
3076  SDValue Hi = DAG.getConstant(V.lshr(32).zextOrTrunc(32), DL, MVT::i32);
3077  if (DAG.getDataLayout().isLittleEndian())
3078  std::swap(Lo, Hi);
3079  return DAG.getBuildVector(MVT::v2i32, DL, {Hi, Lo});
3080 }
3081 
3083  DAGCombinerInfo &DCI) const {
3084  SDLoc dl(N);
3085  SDValue Src = N->getOperand(0);
3086 
3087  if (isa<ConstantFPSDNode>(Src) && N->getSimpleValueType(0) == MVT::v2i32 &&
3088  Src.getSimpleValueType() == MVT::f64)
3089  return bitcastConstantFPToInt(cast<ConstantFPSDNode>(Src), dl, DCI.DAG);
3090 
3091  return SDValue();
3092 }
3093 
3095  DAGCombinerInfo &DCI) const {
3096  switch (N->getOpcode()) {
3097  default:
3098  break;
3099  case ISD::BITCAST:
3100  return PerformBITCASTCombine(N, DCI);
3101  }
3102  return SDValue();
3103 }
3104 
3107  MachineBasicBlock *BB) const {
3108  switch (MI.getOpcode()) {
3109  default: llvm_unreachable("Unknown SELECT_CC!");
3110  case SP::SELECT_CC_Int_ICC:
3111  case SP::SELECT_CC_FP_ICC:
3112  case SP::SELECT_CC_DFP_ICC:
3113  case SP::SELECT_CC_QFP_ICC:
3114  return expandSelectCC(MI, BB, SP::BCOND);
3115  case SP::SELECT_CC_Int_FCC:
3116  case SP::SELECT_CC_FP_FCC:
3117  case SP::SELECT_CC_DFP_FCC:
3118  case SP::SELECT_CC_QFP_FCC:
3119  return expandSelectCC(MI, BB, SP::FBCOND);
3120  }
3121 }
3122 
3125  unsigned BROpcode) const {
3126  const TargetInstrInfo &TII = *Subtarget->getInstrInfo();
3127  DebugLoc dl = MI.getDebugLoc();
3128  unsigned CC = (SPCC::CondCodes)MI.getOperand(3).getImm();
3129 
3130  // To "insert" a SELECT_CC instruction, we actually have to insert the
3131  // triangle control-flow pattern. The incoming instruction knows the
3132  // destination vreg to set, the condition code register to branch on, the
3133  // true/false values to select between, and the condition code for the branch.
3134  //
3135  // We produce the following control flow:
3136  // ThisMBB
3137  // | \
3138  // | IfFalseMBB
3139  // | /
3140  // SinkMBB
3141  const BasicBlock *LLVM_BB = BB->getBasicBlock();
3142  MachineFunction::iterator It = ++BB->getIterator();
3143 
3144  MachineBasicBlock *ThisMBB = BB;
3145  MachineFunction *F = BB->getParent();
3146  MachineBasicBlock *IfFalseMBB = F->CreateMachineBasicBlock(LLVM_BB);
3147  MachineBasicBlock *SinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
3148  F->insert(It, IfFalseMBB);
3149  F->insert(It, SinkMBB);
3150 
3151  // Transfer the remainder of ThisMBB and its successor edges to SinkMBB.
3152  SinkMBB->splice(SinkMBB->begin(), ThisMBB,
3153  std::next(MachineBasicBlock::iterator(MI)), ThisMBB->end());
3154  SinkMBB->transferSuccessorsAndUpdatePHIs(ThisMBB);
3155 
3156  // Set the new successors for ThisMBB.
3157  ThisMBB->addSuccessor(IfFalseMBB);
3158  ThisMBB->addSuccessor(SinkMBB);
3159 
3160  BuildMI(ThisMBB, dl, TII.get(BROpcode))
3161  .addMBB(SinkMBB)
3162  .addImm(CC);
3163 
3164  // IfFalseMBB just falls through to SinkMBB.
3165  IfFalseMBB->addSuccessor(SinkMBB);
3166 
3167  // %Result = phi [ %TrueValue, ThisMBB ], [ %FalseValue, IfFalseMBB ]
3168  BuildMI(*SinkMBB, SinkMBB->begin(), dl, TII.get(SP::PHI),
3169  MI.getOperand(0).getReg())
3170  .addReg(MI.getOperand(1).getReg())
3171  .addMBB(ThisMBB)
3172  .addReg(MI.getOperand(2).getReg())
3173  .addMBB(IfFalseMBB);
3174 
3175  MI.eraseFromParent(); // The pseudo instruction is gone now.
3176  return SinkMBB;
3177 }
3178 
3179 //===----------------------------------------------------------------------===//
3180 // Sparc Inline Assembly Support
3181 //===----------------------------------------------------------------------===//
3182 
3183 /// getConstraintType - Given a constraint letter, return the type of
3184 /// constraint it is for this target.
3187  if (Constraint.size() == 1) {
3188  switch (Constraint[0]) {
3189  default: break;
3190  case 'r':
3191  case 'f':
3192  case 'e':
3193  return C_RegisterClass;
3194  case 'I': // SIMM13
3195  return C_Immediate;
3196  }
3197  }
3198 
3199  return TargetLowering::getConstraintType(Constraint);
3200 }
3201 
3204  const char *constraint) const {
3205  ConstraintWeight weight = CW_Invalid;
3206  Value *CallOperandVal = info.CallOperandVal;
3207  // If we don't have a value, we can't do a match,
3208  // but allow it at the lowest weight.
3209  if (!CallOperandVal)
3210  return CW_Default;
3211 
3212  // Look at the constraint type.
3213  switch (*constraint) {
3214  default:
3216  break;
3217  case 'I': // SIMM13
3218  if (ConstantInt *C = dyn_cast<ConstantInt>(info.CallOperandVal)) {
3219  if (isInt<13>(C->getSExtValue()))
3220  weight = CW_Constant;
3221  }
3222  break;
3223  }
3224  return weight;
3225 }
3226 
3227 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
3228 /// vector. If it is invalid, don't add anything to Ops.
3231  std::string &Constraint,
3232  std::vector<SDValue> &Ops,
3233  SelectionDAG &DAG) const {
3234  SDValue Result(nullptr, 0);
3235 
3236  // Only support length 1 constraints for now.
3237  if (Constraint.length() > 1)
3238  return;
3239 
3240  char ConstraintLetter = Constraint[0];
3241  switch (ConstraintLetter) {
3242  default: break;
3243  case 'I':
3244  if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
3245  if (isInt<13>(C->getSExtValue())) {
3246  Result = DAG.getTargetConstant(C->getSExtValue(), SDLoc(Op),
3247  Op.getValueType());
3248  break;
3249  }
3250  return;
3251  }
3252  }
3253 
3254  if (Result.getNode()) {
3255  Ops.push_back(Result);
3256  return;
3257  }
3258  TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
3259 }
3260 
3261 std::pair<unsigned, const TargetRegisterClass *>
3263  StringRef Constraint,
3264  MVT VT) const {
3265  if (Constraint.size() == 1) {
3266  switch (Constraint[0]) {
3267  case 'r':
3268  if (VT == MVT::v2i32)
3269  return std::make_pair(0U, &SP::IntPairRegClass);
3270  else if (Subtarget->is64Bit())
3271  return std::make_pair(0U, &SP::I64RegsRegClass);
3272  else
3273  return std::make_pair(0U, &SP::IntRegsRegClass);
3274  case 'f':
3275  if (VT == MVT::f32 || VT == MVT::i32)
3276  return std::make_pair(0U, &SP::FPRegsRegClass);
3277  else if (VT == MVT::f64 || VT == MVT::i64)
3278  return std::make_pair(0U, &SP::LowDFPRegsRegClass);
3279  else if (VT == MVT::f128)
3280  return std::make_pair(0U, &SP::LowQFPRegsRegClass);
3281  // This will generate an error message
3282  return std::make_pair(0U, nullptr);
3283  case 'e':
3284  if (VT == MVT::f32 || VT == MVT::i32)
3285  return std::make_pair(0U, &SP::FPRegsRegClass);
3286  else if (VT == MVT::f64 || VT == MVT::i64 )
3287  return std::make_pair(0U, &SP::DFPRegsRegClass);
3288  else if (VT == MVT::f128)
3289  return std::make_pair(0U, &SP::QFPRegsRegClass);
3290  // This will generate an error message
3291  return std::make_pair(0U, nullptr);
3292  }
3293  } else if (!Constraint.empty() && Constraint.size() <= 5
3294  && Constraint[0] == '{' && *(Constraint.end()-1) == '}') {
3295  // constraint = '{r<d>}'
3296  // Remove the braces from around the name.
3297  StringRef name(Constraint.data()+1, Constraint.size()-2);
3298  // Handle register aliases:
3299  // r0-r7 -> g0-g7
3300  // r8-r15 -> o0-o7
3301  // r16-r23 -> l0-l7
3302  // r24-r31 -> i0-i7
3303  uint64_t intVal = 0;
3304  if (name.substr(0, 1).equals("r")
3305  && !name.substr(1).getAsInteger(10, intVal) && intVal <= 31) {
3306  const char regTypes[] = { 'g', 'o', 'l', 'i' };
3307  char regType = regTypes[intVal/8];
3308  char regIdx = '0' + (intVal % 8);
3309  char tmp[] = { '{', regType, regIdx, '}', 0 };
3310  std::string newConstraint = std::string(tmp);
3311  return TargetLowering::getRegForInlineAsmConstraint(TRI, newConstraint,
3312  VT);
3313  }
3314  if (name.substr(0, 1).equals("f") &&
3315  !name.substr(1).getAsInteger(10, intVal) && intVal <= 63) {
3316  std::string newConstraint;
3317 
3318  if (VT == MVT::f32 || VT == MVT::Other) {
3319  newConstraint = "{f" + utostr(intVal) + "}";
3320  } else if (VT == MVT::f64 && (intVal % 2 == 0)) {
3321  newConstraint = "{d" + utostr(intVal / 2) + "}";
3322  } else if (VT == MVT::f128 && (intVal % 4 == 0)) {
3323  newConstraint = "{q" + utostr(intVal / 4) + "}";
3324  } else {
3325  return std::make_pair(0U, nullptr);
3326  }
3327  return TargetLowering::getRegForInlineAsmConstraint(TRI, newConstraint,
3328  VT);
3329  }
3330  }
3331 
3332  return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
3333 }
3334 
3335 bool
3337  // The Sparc target isn't yet aware of offsets.
3338  return false;
3339 }
3340 
3343  SelectionDAG &DAG) const {
3344 
3345  SDLoc dl(N);
3346 
3347  RTLIB::Libcall libCall = RTLIB::UNKNOWN_LIBCALL;
3348 
3349  switch (N->getOpcode()) {
3350  default:
3351  llvm_unreachable("Do not know how to custom type legalize this operation!");
3352 
3353  case ISD::FP_TO_SINT:
3354  case ISD::FP_TO_UINT:
3355  // Custom lower only if it involves f128 or i64.
3356  if (N->getOperand(0).getValueType() != MVT::f128
3357  || N->getValueType(0) != MVT::i64)
3358  return;
3359  libCall = ((N->getOpcode() == ISD::FP_TO_SINT)
3360  ? RTLIB::FPTOSINT_F128_I64
3361  : RTLIB::FPTOUINT_F128_I64);
3362 
3363  Results.push_back(LowerF128Op(SDValue(N, 0),
3364  DAG,
3365  getLibcallName(libCall),
3366  1));
3367  return;
3368  case ISD::READCYCLECOUNTER: {
3369  assert(Subtarget->hasLeonCycleCounter());
3370  SDValue Lo = DAG.getCopyFromReg(N->getOperand(0), dl, SP::ASR23, MVT::i32);
3371  SDValue Hi = DAG.getCopyFromReg(Lo, dl, SP::G0, MVT::i32);
3372  SDValue Ops[] = { Lo, Hi };
3373  SDValue Pair = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Ops);
3374  Results.push_back(Pair);
3375  Results.push_back(N->getOperand(0));
3376  return;
3377  }
3378  case ISD::SINT_TO_FP:
3379  case ISD::UINT_TO_FP:
3380  // Custom lower only if it involves f128 or i64.
3381  if (N->getValueType(0) != MVT::f128
3382  || N->getOperand(0).getValueType() != MVT::i64)
3383  return;
3384 
3385  libCall = ((N->getOpcode() == ISD::SINT_TO_FP)
3386  ? RTLIB::SINTTOFP_I64_F128
3387  : RTLIB::UINTTOFP_I64_F128);
3388 
3389  Results.push_back(LowerF128Op(SDValue(N, 0),
3390  DAG,
3391  getLibcallName(libCall),
3392  1));
3393  return;
3394  case ISD::LOAD: {
3395  LoadSDNode *Ld = cast<LoadSDNode>(N);
3396  // Custom handling only for i64: turn i64 load into a v2i32 load,
3397  // and a bitcast.
3398  if (Ld->getValueType(0) != MVT::i64 || Ld->getMemoryVT() != MVT::i64)
3399  return;
3400 
3401  SDLoc dl(N);
3402  SDValue LoadRes = DAG.getExtLoad(
3403  Ld->getExtensionType(), dl, MVT::v2i32, Ld->getChain(),
3404  Ld->getBasePtr(), Ld->getPointerInfo(), MVT::v2i32,
3405  Ld->getOriginalAlign(), Ld->getMemOperand()->getFlags(),
3406  Ld->getAAInfo());
3407 
3408  SDValue Res = DAG.getNode(ISD::BITCAST, dl, MVT::i64, LoadRes);
3409  Results.push_back(Res);
3410  Results.push_back(LoadRes.getValue(1));
3411  return;
3412  }
3413  }
3414 }
3415 
3416 // Override to enable LOAD_STACK_GUARD lowering on Linux.
3418  if (!Subtarget->isTargetLinux())
3420  return true;
3421 }
3422 
3423 // Override to disable global variable loading on Linux.
3425  if (!Subtarget->isTargetLinux())
3427 }
llvm::ISD::SUB
@ SUB
Definition: ISDOpcodes.h:233
llvm::Check::Size
@ Size
Definition: FileCheck.h:73
llvm::StringSwitch::Case
StringSwitch & Case(StringLiteral S, T Value)
Definition: StringSwitch.h:67
llvm::CCValAssign::getLocVT
MVT getLocVT() const
Definition: CallingConvLower.h:153
llvm::MachineRegisterInfo::addLiveIn
void addLiveIn(MCRegister Reg, Register vreg=Register())
addLiveIn - Add the specified register as a live-in.
Definition: MachineRegisterInfo.h:929
llvm::SparcMCExpr::VK_Sparc_TLS_IE_LO10
@ VK_Sparc_TLS_IE_LO10
Definition: SparcMCExpr.h:56
i
i
Definition: README.txt:29
llvm::ISD::SETUGE
@ SETUGE
Definition: ISDOpcodes.h:1349
llvm::SparcRegisterInfo
Definition: SparcRegisterInfo.h:22
llvm::alignTo
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition: Alignment.h:158
L5
to esp esp setne al movzbw ax esp setg cl movzbw cx cmove cx cl jne LBB1_2 esp which is much esp edx eax decl edx jle L7 L5
Definition: README.txt:656
llvm::LoadSDNode::getOffset
const SDValue & getOffset() const
Definition: SelectionDAGNodes.h:2275
llvm::SPISD::GLOBAL_BASE_REG
@ GLOBAL_BASE_REG
Definition: SparcISelLowering.h:44
llvm::CCValAssign::ZExt
@ ZExt
Definition: CallingConvLower.h:38
llvm::SPISD::TLS_ADD
@ TLS_ADD
Definition: SparcISelLowering.h:47
toCallerWindow
static unsigned toCallerWindow(unsigned Reg)
Definition: SparcISelLowering.cpp:186
llvm::Argument
This class represents an incoming formal argument to a Function.
Definition: Argument.h:29
llvm::ConstantSDNode
Definition: SelectionDAGNodes.h:1536
llvm::RegisterSDNode
Definition: SelectionDAGNodes.h:2072
LowerATOMIC_LOAD_STORE
static SDValue LowerATOMIC_LOAD_STORE(SDValue Op, SelectionDAG &DAG)
Definition: SparcISelLowering.cpp:2987
llvm::StoreSDNode::getBasePtr
const SDValue & getBasePtr() const
Definition: SelectionDAGNodes.h:2305
llvm::RISCVAttrs::StackAlign
StackAlign
Definition: RISCVAttributes.h:37
LowerVASTART
static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI)
Definition: SparcISelLowering.cpp:2506
SparcRegisterInfo.h
llvm::SPCC::ICC_CS
@ ICC_CS
Definition: Sparc.h:52
llvm::SparcMCExpr::VK_Sparc_TLS_IE_ADD
@ VK_Sparc_TLS_IE_ADD
Definition: SparcMCExpr.h:59
llvm::SelectionDAG::getCALLSEQ_START
SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)
Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...
Definition: SelectionDAG.h:919
llvm::ISD::SETLE
@ SETLE
Definition: ISDOpcodes.h:1360
llvm::SPISD::ITOF
@ ITOF
Definition: SparcISelLowering.h:38
llvm::ISD::SETO
@ SETO
Definition: ISDOpcodes.h:1345
MI
IRTranslator LLVM IR MI
Definition: IRTranslator.cpp:100
llvm::ISD::UMULO
@ UMULO
Definition: ISDOpcodes.h:312
llvm::MachineInstrBuilder::addImm
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
Definition: MachineInstrBuilder.h:132
llvm
Definition: AllocatorList.h:23
llvm::ISD::ArgFlagsTy::isSRet
bool isSRet() const
Definition: TargetCallingConv.h:80
llvm::SparcMCExpr::VK_Sparc_L44
@ VK_Sparc_L44
Definition: SparcMCExpr.h:31
Reg
unsigned Reg
Definition: MachineSink.cpp:1566
M
We currently emits eax Perhaps this is what we really should generate is Is imull three or four cycles eax eax The current instruction priority is based on pattern complexity The former is more complex because it folds a load so the latter will not be emitted Perhaps we should use AddedComplexity to give LEA32r a higher priority We should always try to match LEA first since the LEA matching code does some estimate to determine whether the match is profitable if we care more about code then imull is better It s two bytes shorter than movl leal On a Pentium M
Definition: README.txt:252
llvm::StringRef::empty
LLVM_NODISCARD bool empty() const
empty - Check if the string is empty.
Definition: StringRef.h:156
llvm::SDNode::getValueType
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
Definition: SelectionDAGNodes.h:958
llvm::SystemZISD::TM
@ TM
Definition: SystemZISelLowering.h:65
llvm::PICLevel::SmallPIC
@ SmallPIC
Definition: CodeGen.h:33
llvm::SDLoc
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Definition: SelectionDAGNodes.h:1078
llvm::SPCC::FCC_G
@ FCC_G
Definition: Sparc.h:61
llvm::CCValAssign::Full
@ Full
Definition: CallingConvLower.h:36
llvm::TargetLoweringBase::Legal
@ Legal
Definition: TargetLowering.h:193
llvm::DataLayout
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:112
llvm::ISD::OR
@ OR
Definition: ISDOpcodes.h:618
llvm::SparcTargetLowering::LowerConstantPool
SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const
Definition: SparcISelLowering.cpp:2011
llvm::TargetMachine::useEmulatedTLS
bool useEmulatedTLS() const
Returns true if this target uses emulated TLS.
Definition: TargetMachine.cpp:162
llvm::ISD::SETGT
@ SETGT
Definition: ISDOpcodes.h:1357
llvm::ISD::BITCAST
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
Definition: ISDOpcodes.h:833
llvm::ISD::SETNE
@ SETNE
Definition: ISDOpcodes.h:1361
llvm::MachineRegisterInfo::createVirtualRegister
Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
Definition: MachineRegisterInfo.cpp:158
llvm::TargetLowering::getSingleConstraintMatchWeight
virtual ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const
Examine constraint string and operand type and determine a weight value.
Definition: TargetLowering.cpp:4838
llvm::MachineFrameInfo::setReturnAddressIsTaken
void setReturnAddressIsTaken(bool s)
Definition: MachineFrameInfo.h:373
llvm::TargetLowering::ConstraintType
ConstraintType
Definition: TargetLowering.h:4101
llvm::ISD::BR_JT
@ BR_JT
BR_JT - Jumptable branch.
Definition: ISDOpcodes.h:931
llvm::KnownBits::resetAll
void resetAll()
Resets the known state of all bits.
Definition: KnownBits.h:66
llvm::SparcTargetLowering::isOffsetFoldingLegal
bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override
Return true if folding a constant offset with the given GlobalAddress is legal.
Definition: SparcISelLowering.cpp:3336
llvm::TargetLowering::CallLoweringInfo::setChain
CallLoweringInfo & setChain(SDValue InChain)
Definition: TargetLowering.h:3736
llvm::SparcMCExpr::VK_Sparc_HM
@ VK_Sparc_HM
Definition: SparcMCExpr.h:33
llvm::CCState
CCState - This class holds information needed while lowering arguments and return values.
Definition: CallingConvLower.h:191
llvm::ISD::AssertSext
@ AssertSext
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
Definition: ISDOpcodes.h:59
llvm::SelectionDAG::getCopyToReg
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, unsigned Reg, SDValue N)
Definition: SelectionDAG.h:735
llvm::SPCC::FCC_LE
@ FCC_LE
Definition: Sparc.h:71
llvm::EVT::getFixedSizeInBits
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
Definition: ValueTypes.h:341
llvm::MachineRegisterInfo
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Definition: MachineRegisterInfo.h:52
llvm::SDValue::getNode
SDNode * getNode() const
get the SDNode which holds the desired result
Definition: SelectionDAGNodes.h:152
llvm::isOneConstant
bool isOneConstant(SDValue V)
Returns true if V is a constant integer one.
Definition: SelectionDAG.cpp:9362
llvm::PointerType::getElementType
Type * getElementType() const
Definition: DerivedTypes.h:653
llvm::Function
Definition: Function.h:61
llvm::SPCC::ICC_LE
@ ICC_LE
Definition: Sparc.h:46
llvm::ISD::BSWAP
@ BSWAP
Byte Swap and Counting operators.
Definition: ISDOpcodes.h:651
llvm::StringSwitch::Default
LLVM_NODISCARD R Default(T Value)
Definition: StringSwitch.h:181
llvm::ISD::UDIV
@ UDIV
Definition: ISDOpcodes.h:236
is64Bit
static bool is64Bit(const char *name)
Definition: X86Disassembler.cpp:1005
llvm::MVT::i128
@ i128
Definition: MachineValueType.h:45
llvm::ISD::DYNAMIC_STACKALLOC
@ DYNAMIC_STACKALLOC
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary.
Definition: ISDOpcodes.h:916
llvm::SelectionDAG::getValueType
SDValue getValueType(EVT)
Definition: SelectionDAG.cpp:1696
llvm::SelectionDAG::getMemcpy
SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, bool AlwaysInline, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo)
Definition: SelectionDAG.cpp:6624
llvm::CCState::addLoc
void addLoc(const CCValAssign &V)
Definition: CallingConvLower.h:253
IntCondCCodeToICC
static SPCC::CondCodes IntCondCCodeToICC(ISD::CondCode CC)
IntCondCCodeToICC - Convert a DAG integer condition code to a SPARC ICC condition.
Definition: SparcISelLowering.cpp:1364
CC_Sparc64_Full
static bool CC_Sparc64_Full(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
Definition: SparcISelLowering.cpp:104
llvm::SparcSubtarget::isV9
bool isV9() const
Definition: SparcSubtarget.h:81
llvm::ISD::ADDC
@ ADDC
Carry-setting nodes for multiple precision addition and subtraction.
Definition: ISDOpcodes.h:262
llvm::SPCC::FCC_UE
@ FCC_UE
Definition: Sparc.h:68
llvm::TLSModel::GeneralDynamic
@ GeneralDynamic
Definition: CodeGen.h:43
llvm::CodeModel::Medium
@ Medium
Definition: CodeGen.h:28
llvm::AtomicRMWInst::getOperation
BinOp getOperation() const
Definition: Instructions.h:781
llvm::TargetLoweringBase::setMinCmpXchgSizeInBits
void setMinCmpXchgSizeInBits(unsigned SizeInBits)
Sets the minimum cmpxchg or ll/sc size supported by the backend.
Definition: TargetLowering.h:2289
llvm::SelectionDAG::getFrameIndex
SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
Definition: SelectionDAG.cpp:1577
llvm::SmallVector
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1168
llvm::ISD::SETEQ
@ SETEQ
Definition: ISDOpcodes.h:1356
llvm::SPCC::ICC_L
@ ICC_L
Definition: Sparc.h:48
llvm::ISD::STACKRESTORE
@ STACKRESTORE
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain.
Definition: ISDOpcodes.h:997
llvm::SelectionDAG::getVTList
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
Definition: SelectionDAG.cpp:7967
llvm::SparcMCExpr::VK_Sparc_TLS_LE_LOX10
@ VK_Sparc_TLS_LE_LOX10
Definition: SparcMCExpr.h:61
llvm::SparcMachineFunctionInfo::setSRetReturnReg
void setSRetReturnReg(Register Reg)
Definition: SparcMachineFunctionInfo.h:48
llvm::MipsISD::Lo
@ Lo
Definition: MipsISelLowering.h:79
ErrorHandling.h
llvm::SelectionDAG::getRoot
const SDValue & getRoot() const
Return the root tag of the SelectionDAG.
Definition: SelectionDAG.h:513
llvm::SparcTargetLowering::getRegisterByName
Register getRegisterByName(const char *RegName, LLT VT, const MachineFunction &MF) const override
Return the register ID of the name passed in.
Definition: SparcISelLowering.cpp:1018
llvm::SparcMCExpr::VK_Sparc_TLS_LDO_ADD
@ VK_Sparc_TLS_LDO_ADD
Definition: SparcMCExpr.h:54
llvm::APInt::zextOrTrunc
APInt zextOrTrunc(unsigned width) const
Zero extend or truncate to width.
Definition: APInt.cpp:948
llvm::SparcTargetLowering::useLoadStackGuardNode
bool useLoadStackGuardNode() const override
Override to support customized stack guard loading.
Definition: SparcISelLowering.cpp:3417
llvm::MemSDNode::getMemoryVT
EVT getMemoryVT() const
Return the type of the in-memory value.
Definition: SelectionDAGNodes.h:1325
llvm::MemSDNode::getChain
const SDValue & getChain() const
Definition: SelectionDAGNodes.h:1348
llvm::ISD::ANY_EXTEND
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
Definition: ISDOpcodes.h:717
llvm::SDNode
Represents one node in the SelectionDAG.
Definition: SelectionDAGNodes.h:455
llvm::TargetLoweringBase::getLibcallName
const char * getLibcallName(RTLIB::Libcall Call) const
Get the libcall routine name for the specified libcall.
Definition: TargetLowering.h:2811
llvm::TargetLowering::CW_Constant
@ CW_Constant
Definition: TargetLowering.h:4122
llvm::ISD::FMA
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
Definition: ISDOpcodes.h:460
llvm::ISD::FP_TO_SINT
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
Definition: ISDOpcodes.h:770
llvm::TargetLowering::DAGCombinerInfo::DAG
SelectionDAG & DAG
Definition: TargetLowering.h:3495
llvm::SparcTargetLowering::LowerINTRINSIC_WO_CHAIN
SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const
Definition: SparcISelLowering.cpp:2996
llvm::LoadSDNode
This class is used to represent ISD::LOAD nodes.
Definition: SelectionDAGNodes.h:2255
llvm::TargetLowering::CallLoweringInfo::setCallee
CallLoweringInfo & setCallee(CallingConv::ID CC, Type *ResultType, SDValue Target, ArgListTy &&ArgsList)
Definition: TargetLowering.h:3755
llvm::MVT::Glue
@ Glue
Definition: MachineValueType.h:247
llvm::SparcSubtarget::getRegisterInfo
const SparcRegisterInfo * getRegisterInfo() const override
Definition: SparcSubtarget.h:68
llvm::TargetRegisterInfo
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
Definition: TargetRegisterInfo.h:231
llvm::Depth
@ Depth
Definition: SIMachineScheduler.h:34
llvm::TargetLowering::isPositionIndependent
bool isPositionIndependent() const
Definition: TargetLowering.cpp:45
llvm::SPCC::FCC_UG
@ FCC_UG
Definition: Sparc.h:62
llvm::ISD::SETULE
@ SETULE
Definition: ISDOpcodes.h:1351
llvm::CCState::AnalyzeFormalArguments
void AnalyzeFormalArguments(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
AnalyzeFormalArguments - Analyze an array of argument values, incorporating info about the formals in...
Definition: CallingConvLower.cpp:90
llvm::Type
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:46
llvm::RTLIB::Libcall
Libcall
RTLIB::Libcall enum - This enum defines all of the runtime library calls the backend can emit.
Definition: RuntimeLibcalls.h:30
llvm::SparcTargetLowering::LowerF128_LibCallArg
SDValue LowerF128_LibCallArg(SDValue Chain, ArgListTy &Args, SDValue Arg, const SDLoc &DL, SelectionDAG &DAG) const
Definition: SparcISelLowering.cpp:2126
Module.h
llvm::SPCC::FCC_O
@ FCC_O
Definition: Sparc.h:73
llvm::ISD::SHL_PARTS
@ SHL_PARTS
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.
Definition: ISDOpcodes.h:700
llvm::SelectionDAG::getStore
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
Definition: SelectionDAG.cpp:7296
llvm::SPCC::ICC_LEU
@ ICC_LEU
Definition: Sparc.h:50
llvm::ISD::SETCC
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
Definition: ISDOpcodes.h:687
llvm::SparcTargetLowering::LowerReturn_32
SDValue LowerReturn_32(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, const SmallVectorImpl< SDValue > &OutVals, const SDLoc &DL, SelectionDAG &DAG) const
Definition: SparcISelLowering.cpp:206
llvm::SPISD::BRXCC
@ BRXCC
Definition: SparcISelLowering.h:29
llvm::SparcTargetLowering
Definition: SparcISelLowering.h:53
llvm::MVT::integer_valuetypes
static mvt_range integer_valuetypes()
Definition: MachineValueType.h:1362
llvm::SparcSubtarget::getFrameLowering
const TargetFrameLowering * getFrameLowering() const override
Definition: SparcSubtarget.h:65
llvm::Function::hasFnAttribute
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition: Function.h:345
llvm::TargetLowering::CallLoweringInfo::CB
const CallBase * CB
Definition: TargetLowering.h:3719
llvm::CCValAssign::BCvt
@ BCvt
Definition: CallingConvLower.h:46
llvm::TargetLoweringBase::setMinFunctionAlignment
void setMinFunctionAlignment(Align Alignment)
Set the target's minimum function alignment.
Definition: TargetLowering.h:2260
llvm::TargetLowering::LowerCallTo
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
This function lowers an abstract call to a function into an actual call.
Definition: SelectionDAGBuilder.cpp:9281
LowerF128_FPEXTEND
static SDValue LowerF128_FPEXTEND(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI)
Definition: SparcISelLowering.cpp:2303
llvm::SparcMCExpr::VK_Sparc_TLS_LDM_ADD
@ VK_Sparc_TLS_LDM_ADD
Definition: SparcMCExpr.h:50
Offset
uint64_t Offset
Definition: ELFObjHandler.cpp:81
llvm::ore::NV
DiagnosticInfoOptimizationBase::Argument NV
Definition: OptimizationRemarkEmitter.h:128
llvm::tgtok::FalseVal
@ FalseVal
Definition: TGLexer.h:61
Results
Function Alias Analysis Results
Definition: AliasAnalysis.cpp:849
llvm::TargetLoweringBase::getVectorIdxTy
virtual MVT getVectorIdxTy(const DataLayout &DL) const
Returns the type to be used for the index operand of: ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT...
Definition: TargetLowering.h:388
fixupVariableFloatArgs
static void fixupVariableFloatArgs(SmallVectorImpl< CCValAssign > &ArgLocs, ArrayRef< ISD::OutputArg > Outs)
Definition: SparcISelLowering.cpp:1045
llvm::MipsISD::Ret
@ Ret
Definition: MipsISelLowering.h:116
llvm::ISD::VAEND
@ VAEND
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE.
Definition: ISDOpcodes.h:1026
llvm::ISD::EXTLOAD
@ EXTLOAD
Definition: ISDOpcodes.h:1316
llvm::APInt::lshr
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
Definition: APInt.h:988
llvm::SPCC::CondCodes
CondCodes
Definition: Sparc.h:40
tmp
alloca< 16 x float >, align 16 %tmp2=alloca< 16 x float >, align 16 store< 16 x float > %A,< 16 x float > *%tmp %s=bitcast< 16 x float > *%tmp to i8 *%s2=bitcast< 16 x float > *%tmp2 to i8 *call void @llvm.memcpy.i64(i8 *%s, i8 *%s2, i64 64, i32 16) %R=load< 16 x float > *%tmp2 ret< 16 x float > %R } declare void @llvm.memcpy.i64(i8 *nocapture, i8 *nocapture, i64, i32) nounwind which compiles to:_foo:subl $140, %esp movaps %xmm3, 112(%esp) movaps %xmm2, 96(%esp) movaps %xmm1, 80(%esp) movaps %xmm0, 64(%esp) movl 60(%esp), %eax movl %eax, 124(%esp) movl 56(%esp), %eax movl %eax, 120(%esp) movl 52(%esp), %eax< many many more 32-bit copies > movaps(%esp), %xmm0 movaps 16(%esp), %xmm1 movaps 32(%esp), %xmm2 movaps 48(%esp), %xmm3 addl $140, %esp ret On Nehalem, it may even be cheaper to just use movups when unaligned than to fall back to lower-granularity chunks. Implement processor-specific optimizations for parity with GCC on these processors. GCC does two optimizations:1. ix86_pad_returns inserts a noop before ret instructions if immediately preceded by a conditional branch or is the target of a jump. 2. ix86_avoid_jump_misspredicts inserts noops in cases where a 16-byte block of code contains more than 3 branches. The first one is done for all AMDs, Core2, and "Generic" The second one is done for:Atom, Pentium Pro, all AMDs, Pentium 4, Nocona, Core 2, and "Generic" Testcase:int x(int a) { return(a &0xf0)> >4 tmp
Definition: README.txt:1347
llvm::ISD::SETOEQ
@ SETOEQ
Definition: ISDOpcodes.h:1339
llvm::BlockAddressSDNode
Definition: SelectionDAGNodes.h:2106
llvm::SparcMCExpr::VK_Sparc_TLS_LDM_HI22
@ VK_Sparc_TLS_LDM_HI22
Definition: SparcMCExpr.h:48
SelectionDAG.h
llvm::BitmaskEnumDetail::Mask
std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
Definition: BitmaskEnum.h:80
TRI
unsigned const TargetRegisterInfo * TRI
Definition: MachineSink.cpp:1567
SparcISelLowering.h
llvm::SparcTargetLowering::EmitInstrWithCustomInserter
MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *MBB) const override
This method should be implemented by targets that mark instructions with the 'usesCustomInserter' fla...
Definition: SparcISelLowering.cpp:3106
llvm::SPISD::FIRST_NUMBER
@ FIRST_NUMBER
Definition: SparcISelLowering.h:25
llvm::Type::getInt32Ty
static IntegerType * getInt32Ty(LLVMContext &C)
Definition: Type.cpp:204
llvm::ISD::SETUEQ
@ SETUEQ
Definition: ISDOpcodes.h:1347
llvm::CCState::AnalyzeCallOperands
void AnalyzeCallOperands(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
AnalyzeCallOperands - Analyze the outgoing arguments to a call, incorporating info about the passed v...
Definition: CallingConvLower.cpp:131
llvm::SelectionDAG::getContext
LLVMContext * getContext() const
Definition: SelectionDAG.h:447
llvm::ISD::FABS
@ FABS
Definition: ISDOpcodes.h:852
llvm::SparcMCExpr::VK_Sparc_M44
@ VK_Sparc_M44
Definition: SparcMCExpr.h:30
F
#define F(x, y, z)
Definition: MD5.cpp:56
llvm::ISD::BRCOND
@ BRCOND
BRCOND - Conditional branch.
Definition: ISDOpcodes.h:941
MachineRegisterInfo.h
KnownBits.h
getFRAMEADDR
static SDValue getFRAMEADDR(uint64_t depth, SDValue Op, SelectionDAG &DAG, const SparcSubtarget *Subtarget, bool AlwaysFlush=false)
Definition: SparcISelLowering.cpp:2622
llvm::SPISD::BRFCC
@ BRFCC
Definition: SparcISelLowering.h:30
llvm::SelectionDAG::getRegister
SDValue getRegister(unsigned Reg, EVT VT)
Definition: SelectionDAG.cpp:1952
llvm::BasicBlock
LLVM Basic Block Representation.
Definition: BasicBlock.h:58
llvm::MipsISD::Hi
@ Hi
Definition: MipsISelLowering.h:75
llvm::SparcMCExpr::VK_Sparc_TLS_GD_CALL
@ VK_Sparc_TLS_GD_CALL
Definition: SparcMCExpr.h:47
SparcTargetObjectFile.h
LookThroughSetCC
static void LookThroughSetCC(SDValue &LHS, SDValue &RHS, ISD::CondCode CC, unsigned &SPCC)
Definition: SparcISelLowering.cpp:1889
CC_Sparc_Assign_SRet
static bool CC_Sparc_Assign_SRet(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
Definition: SparcISelLowering.cpp:41
llvm::ISD::BRIND
@ BRIND
BRIND - Indirect branch.
Definition: ISDOpcodes.h:927
llvm::ISD::ROTL
@ ROTL
Definition: ISDOpcodes.h:645
llvm::SPCC::FCC_ULE
@ FCC_ULE
Definition: Sparc.h:72
llvm::SPISD::FLUSHW
@ FLUSHW
Definition: SparcISelLowering.h:45
Arg
amdgpu Simplify well known AMD library false FunctionCallee Value * Arg
Definition: AMDGPULibCalls.cpp:205
llvm::MachineBasicBlock::addSuccessor
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
Definition: MachineBasicBlock.cpp:743
llvm::CallInfo
Definition: GVNHoist.cpp:217
CC_Sparc_Assign_Split_64
static bool CC_Sparc_Assign_Split_64(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
Definition: SparcISelLowering.cpp:54
llvm::SPCC::FCC_LG
@ FCC_LG
Definition: Sparc.h:65
llvm::ISD::BR_CC
@ BR_CC
BR_CC - Conditional branch.
Definition: ISDOpcodes.h:948
LowerF128_FPROUND
static SDValue LowerF128_FPROUND(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI)
Definition: SparcISelLowering.cpp:2319
llvm::ConstantInt
This is the shared class of boolean and integer constants.
Definition: Constants.h:77
llvm::SelectionDAG::getLoad
SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
Definition: SelectionDAG.cpp:7246
llvm::SparcMCExpr::VK_Sparc_TLS_LE_HIX22
@ VK_Sparc_TLS_LE_HIX22
Definition: SparcMCExpr.h:60
llvm::MVT::i1
@ i1
Definition: MachineValueType.h:40
llvm::SDNode::getOpcode
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
Definition: SelectionDAGNodes.h:621
llvm::TargetLowering::CallLoweringInfo::IsVarArg
bool IsVarArg
Definition: TargetLowering.h:3697
llvm::MachineFunction::getRegInfo
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Definition: MachineFunction.h:565
LowerFNEGorFABS
static SDValue LowerFNEGorFABS(SDValue Op, SelectionDAG &DAG, bool isV9)
Definition: SparcISelLowering.cpp:2845
llvm::CCValAssign::AExt
@ AExt
Definition: CallingConvLower.h:39
llvm::ISD::GlobalAddress
@ GlobalAddress
Definition: ISDOpcodes.h:71
llvm::ISD::SELECT_CC
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
Definition: ISDOpcodes.h:679
llvm::ExternalSymbolSDNode
Definition: SelectionDAGNodes.h:2148
llvm::TargetInstrInfo
TargetInstrInfo - Interface to description of machine instruction set.
Definition: TargetInstrInfo.h:97
LowerUMULO_SMULO
static SDValue LowerUMULO_SMULO(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI)
Definition: SparcISelLowering.cpp:2939
llvm::CCValAssign::isExtInLoc
bool isExtInLoc() const
Definition: CallingConvLower.h:156
llvm::SDValue::getValueType
EVT getValueType() const
Return the ValueType of the referenced return value.
Definition: SelectionDAGNodes.h:1113
llvm::SparcMCExpr::VK_Sparc_TLS_LDM_CALL
@ VK_Sparc_TLS_LDM_CALL
Definition: SparcMCExpr.h:51
llvm::CCValAssign
CCValAssign - Represent assignment of one arg/retval to a location.
Definition: CallingConvLower.h:33
llvm::ISD::CTLZ
@ CTLZ
Definition: ISDOpcodes.h:653
llvm::SPCC::ICC_CC
@ ICC_CC
Definition: Sparc.h:51
llvm::MachineInstrBuilder::addMBB
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
Definition: MachineInstrBuilder.h:147
L2
add sub stmia L5 ldr L2
Definition: README.txt:201
llvm::SelectionDAG
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
Definition: SelectionDAG.h:216
llvm::ISD::SELECT
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
Definition: ISDOpcodes.h:664
llvm::SPISD::Hi
@ Hi
Definition: SparcISelLowering.h:35
llvm::ISD::ZERO_EXTEND
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
Definition: ISDOpcodes.h:714
llvm::SparcTargetLowering::LowerReturn_64
SDValue LowerReturn_64(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, const SmallVectorImpl< SDValue > &OutVals, const SDLoc &DL, SelectionDAG &DAG) const
Definition: SparcISelLowering.cpp:291
llvm::ISD::ArgFlagsTy::isByVal
bool isByVal() const
Definition: TargetCallingConv.h:83
llvm::SparcTargetLowering::LowerFormalArguments_64
SDValue LowerFormalArguments_64(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::InputArg > &Ins, const SDLoc &dl, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals) const
Definition: SparcISelLowering.cpp:576
llvm::SelectionDAG::getTargetBlockAddress
SDValue getTargetBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, unsigned TargetFlags=0)
Definition: SelectionDAG.h:730
llvm::ISD::SETGE
@ SETGE
Definition: ISDOpcodes.h:1358
E
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
llvm::CCValAssign::getLocReg
Register getLocReg() const
Definition: CallingConvLower.h:150
llvm::MachineFunction::getInfo
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
Definition: MachineFunction.h:653
L3
AMD64 Optimization Manual has some nice information about optimizing integer multiplication by a constant How much of it applies to Intel s X86 implementation There are definite trade offs to xmm0 cvttss2siq rdx jb L3 subss xmm0 rax cvttss2siq rdx xorq rdx L3
Definition: README-X86-64.txt:22
llvm::ISD::SIGN_EXTEND_INREG
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
Definition: ISDOpcodes.h:732
llvm::TargetFrameLowering::getStackAlign
Align getStackAlign() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
Definition: TargetFrameLowering.h:99
llvm::EVT
Extended Value Type.
Definition: ValueTypes.h:35
C
(vector float) vec_cmpeq(*A, *B) C
Definition: README_ALTIVEC.txt:86
llvm::SparcMCExpr::VK_Sparc_H44
@ VK_Sparc_H44
Definition: SparcMCExpr.h:29
LowerRETURNADDR
static SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, const SparcSubtarget *Subtarget)
Definition: SparcISelLowering.cpp:2664
llvm::MVT::f64
@ f64
Definition: MachineValueType.h:53
llvm::SelectionDAG::getConstant
SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
Definition: SelectionDAG.cpp:1346
llvm::TargetLowering
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
Definition: TargetLowering.h:3141
llvm::SparcTargetLowering::makeAddress
SDValue makeAddress(SDValue Op, SelectionDAG &DAG) const
Definition: SparcISelLowering.cpp:1947
llvm::TargetLowering::DAGCombinerInfo
Definition: TargetLowering.h:3489
llvm::CCState::AnalyzeReturn
void AnalyzeReturn(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
AnalyzeReturn - Analyze the returned values of a return, incorporating info about the result values i...
Definition: CallingConvLower.cpp:118
SparcMCExpr.h
llvm::ISD::TRUNCATE
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
Definition: ISDOpcodes.h:720
llvm::SparcTargetLowering::SparcTargetLowering
SparcTargetLowering(const TargetMachine &TM, const SparcSubtarget &STI)
Definition: SparcISelLowering.cpp:1408
llvm::ISD::SRA
@ SRA
Definition: ISDOpcodes.h:643
llvm::SPCC::FCC_L
@ FCC_L
Definition: Sparc.h:63
llvm::SparcTargetLowering::bitcastConstantFPToInt
SDValue bitcastConstantFPToInt(ConstantFPSDNode *C, const SDLoc &DL, SelectionDAG &DAG) const
Definition: SparcISelLowering.cpp:3071
llvm::TargetLowering::CallLoweringInfo::setDebugLoc
CallLoweringInfo & setDebugLoc(const SDLoc &dl)
Definition: TargetLowering.h:3731
llvm::ISD::UDIVREM
@ UDIVREM
Definition: ISDOpcodes.h:249
llvm::MachinePointerInfo::getGOT
static MachinePointerInfo getGOT(MachineFunction &MF)
Return a MachinePointerInfo record that refers to a GOT entry.
Definition: MachineOperand.cpp:1004
TII
const HexagonInstrInfo * TII
Definition: HexagonCopyToCombine.cpp:129
llvm::TargetLoweringBase::addRegisterClass
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type.
Definition: TargetLowering.h:2140
llvm::MaybeAlign
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition: Alignment.h:119
llvm::CodeModel::Small
@ Small
Definition: CodeGen.h:28
llvm::SPISD::FTOX
@ FTOX
Definition: SparcISelLowering.h:39
llvm::StoreSDNode::getOffset
const SDValue & getOffset() const
Definition: SelectionDAGNodes.h:2306
llvm::SparcTargetLowering::LowerCall_32
SDValue LowerCall_32(TargetLowering::CallLoweringInfo &CLI, SmallVectorImpl< SDValue > &InVals) const
Definition: SparcISelLowering.cpp:715
llvm::EVT::isInteger
bool isInteger() const
Return true if this is an integer or a vector integer type.
Definition: ValueTypes.h:139
llvm::MCID::Flag
Flag
These should be considered private to the implementation of the MCInstrDesc class.
Definition: MCInstrDesc.h:147
llvm::SDNode::use_empty
bool use_empty() const
Return true if there are no uses of this node.
Definition: SelectionDAGNodes.h:689
llvm::ISD::SINT_TO_FP
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
Definition: ISDOpcodes.h:724
llvm::report_fatal_error
LLVM_ATTRIBUTE_NORETURN void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:140
llvm::CCValAssign::getCustomMem
static CCValAssign getCustomMem(unsigned ValNo, MVT ValVT, unsigned Offset, MVT LocVT, LocInfo HTP)
Definition: CallingConvLower.h:116
llvm::ISD::FP16_TO_FP
@ FP16_TO_FP
FP16_TO_FP, FP_TO_FP16 - These operators are used to perform promotions and truncation for half-preci...
Definition: ISDOpcodes.h:843
llvm::ISD::ATOMIC_STORE
@ ATOMIC_STORE
OUTCHAIN = ATOMIC_STORE(INCHAIN, ptr, val) This corresponds to "store atomic" instruction.
Definition: ISDOpcodes.h:1099
CC_Sparc64_Half
static bool CC_Sparc64_Half(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
Definition: SparcISelLowering.cpp:149
llvm::TargetLowering::C_Immediate
@ C_Immediate
Definition: TargetLowering.h:4105
llvm::SparcRegisterInfo::getCallPreservedMask
const uint32_t * getCallPreservedMask(const MachineFunction &MF, CallingConv::ID CC) const override
Definition: SparcRegisterInfo.cpp:44
llvm::ISD::AND
@ AND
Bitwise operators - logical and, logical or, logical xor.
Definition: ISDOpcodes.h:617
llvm::CCValAssign::getLocInfo
LocInfo getLocInfo() const
Definition: CallingConvLower.h:155
llvm::SparcSubtarget::hasLeonCasa
bool hasLeonCasa() const
Definition: SparcSubtarget.h:95