LLVM 22.0.0git
SparcISelLowering.cpp
Go to the documentation of this file.
1//===-- SparcISelLowering.cpp - Sparc DAG Lowering Implementation ---------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the interfaces that Sparc uses to lower LLVM code into a
10// selection DAG.
11//
12//===----------------------------------------------------------------------===//
13
14#include "SparcISelLowering.h"
17#include "SparcRegisterInfo.h"
19#include "SparcTargetMachine.h"
35#include "llvm/IR/Function.h"
36#include "llvm/IR/IRBuilder.h"
37#include "llvm/IR/Module.h"
40using namespace llvm;
41
42
43//===----------------------------------------------------------------------===//
44// Calling Convention Implementation
45//===----------------------------------------------------------------------===//
46
47static bool CC_Sparc_Assign_SRet(unsigned &ValNo, MVT &ValVT,
48 MVT &LocVT, CCValAssign::LocInfo &LocInfo,
49 ISD::ArgFlagsTy &ArgFlags, CCState &State)
50{
51 assert (ArgFlags.isSRet());
52
53 // Assign SRet argument.
54 State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT,
55 0,
56 LocVT, LocInfo));
57 return true;
58}
59
60static bool CC_Sparc_Assign_Split_64(unsigned &ValNo, MVT &ValVT,
61 MVT &LocVT, CCValAssign::LocInfo &LocInfo,
62 ISD::ArgFlagsTy &ArgFlags, CCState &State)
63{
64 static const MCPhysReg RegList[] = {
65 SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5
66 };
67 // Try to get first reg.
68 if (Register Reg = State.AllocateReg(RegList)) {
69 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
70 } else {
71 // Assign whole thing in stack.
72 State.addLoc(CCValAssign::getCustomMem(
73 ValNo, ValVT, State.AllocateStack(8, Align(4)), LocVT, LocInfo));
74 return true;
75 }
76
77 // Try to get second reg.
78 if (Register Reg = State.AllocateReg(RegList))
79 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
80 else
81 State.addLoc(CCValAssign::getCustomMem(
82 ValNo, ValVT, State.AllocateStack(4, Align(4)), LocVT, LocInfo));
83 return true;
84}
85
86static bool CC_Sparc_Assign_Ret_Split_64(unsigned &ValNo, MVT &ValVT,
87 MVT &LocVT, CCValAssign::LocInfo &LocInfo,
88 ISD::ArgFlagsTy &ArgFlags, CCState &State)
89{
90 static const MCPhysReg RegList[] = {
91 SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5
92 };
93
94 // Try to get first reg.
95 if (Register Reg = State.AllocateReg(RegList))
96 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
97 else
98 return false;
99
100 // Try to get second reg.
101 if (Register Reg = State.AllocateReg(RegList))
102 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
103 else
104 return false;
105
106 return true;
107}
108
109// Allocate a full-sized argument for the 64-bit ABI.
110static bool Analyze_CC_Sparc64_Full(bool IsReturn, unsigned &ValNo, MVT &ValVT,
111 MVT &LocVT, CCValAssign::LocInfo &LocInfo,
112 ISD::ArgFlagsTy &ArgFlags, CCState &State) {
113 assert((LocVT == MVT::f32 || LocVT == MVT::f128
114 || LocVT.getSizeInBits() == 64) &&
115 "Can't handle non-64 bits locations");
116
117 // Stack space is allocated for all arguments starting from [%fp+BIAS+128].
118 unsigned size = (LocVT == MVT::f128) ? 16 : 8;
119 Align alignment =
120 (LocVT == MVT::f128 || ArgFlags.isSplit()) ? Align(16) : Align(8);
121 unsigned Offset = State.AllocateStack(size, alignment);
122 unsigned Reg = 0;
123
124 if (LocVT == MVT::i64 && Offset < 6*8)
125 // Promote integers to %i0-%i5.
126 Reg = SP::I0 + Offset/8;
127 else if (LocVT == MVT::f64 && Offset < 16*8)
128 // Promote doubles to %d0-%d30. (Which LLVM calls D0-D15).
129 Reg = SP::D0 + Offset/8;
130 else if (LocVT == MVT::f32 && Offset < 16*8)
131 // Promote floats to %f1, %f3, ...
132 Reg = SP::F1 + Offset/4;
133 else if (LocVT == MVT::f128 && Offset < 16*8)
134 // Promote long doubles to %q0-%q28. (Which LLVM calls Q0-Q7).
135 Reg = SP::Q0 + Offset/16;
136
137 // Promote to register when possible, otherwise use the stack slot.
138 if (Reg) {
139 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
140 return true;
141 }
142
143 // Bail out if this is a return CC and we run out of registers to place
144 // values into.
145 if (IsReturn)
146 return false;
147
148 // This argument goes on the stack in an 8-byte slot.
149 // When passing floats, LocVT is smaller than 8 bytes. Adjust the offset to
150 // the right-aligned float. The first 4 bytes of the stack slot are undefined.
151 if (LocVT == MVT::f32)
152 Offset += 4;
153
154 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
155 return true;
156}
157
158// Allocate a half-sized argument for the 64-bit ABI.
159//
160// This is used when passing { float, int } structs by value in registers.
161static bool Analyze_CC_Sparc64_Half(bool IsReturn, unsigned &ValNo, MVT &ValVT,
162 MVT &LocVT, CCValAssign::LocInfo &LocInfo,
163 ISD::ArgFlagsTy &ArgFlags, CCState &State) {
164 assert(LocVT.getSizeInBits() == 32 && "Can't handle non-32 bits locations");
165 unsigned Offset = State.AllocateStack(4, Align(4));
166
167 if (LocVT == MVT::f32 && Offset < 16*8) {
168 // Promote floats to %f0-%f31.
169 State.addLoc(CCValAssign::getReg(ValNo, ValVT, SP::F0 + Offset/4,
170 LocVT, LocInfo));
171 return true;
172 }
173
174 if (LocVT == MVT::i32 && Offset < 6*8) {
175 // Promote integers to %i0-%i5, using half the register.
176 unsigned Reg = SP::I0 + Offset/8;
177 LocVT = MVT::i64;
178 LocInfo = CCValAssign::AExt;
179
180 // Set the Custom bit if this i32 goes in the high bits of a register.
181 if (Offset % 8 == 0)
182 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg,
183 LocVT, LocInfo));
184 else
185 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
186 return true;
187 }
188
189 // Bail out if this is a return CC and we run out of registers to place
190 // values into.
191 if (IsReturn)
192 return false;
193
194 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
195 return true;
196}
197
198static bool CC_Sparc64_Full(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
199 CCValAssign::LocInfo &LocInfo,
200 ISD::ArgFlagsTy &ArgFlags, CCState &State) {
201 return Analyze_CC_Sparc64_Full(false, ValNo, ValVT, LocVT, LocInfo, ArgFlags,
202 State);
203}
204
205static bool CC_Sparc64_Half(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
206 CCValAssign::LocInfo &LocInfo,
207 ISD::ArgFlagsTy &ArgFlags, CCState &State) {
208 return Analyze_CC_Sparc64_Half(false, ValNo, ValVT, LocVT, LocInfo, ArgFlags,
209 State);
210}
211
212static bool RetCC_Sparc64_Full(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
213 CCValAssign::LocInfo &LocInfo,
214 ISD::ArgFlagsTy &ArgFlags, CCState &State) {
215 return Analyze_CC_Sparc64_Full(true, ValNo, ValVT, LocVT, LocInfo, ArgFlags,
216 State);
217}
218
219static bool RetCC_Sparc64_Half(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
220 CCValAssign::LocInfo &LocInfo,
221 ISD::ArgFlagsTy &ArgFlags, CCState &State) {
222 return Analyze_CC_Sparc64_Half(true, ValNo, ValVT, LocVT, LocInfo, ArgFlags,
223 State);
224}
225
226#include "SparcGenCallingConv.inc"
227
228// The calling conventions in SparcCallingConv.td are described in terms of the
229// callee's register window. This function translates registers to the
230// corresponding caller window %o register.
231static unsigned toCallerWindow(unsigned Reg) {
232 static_assert(SP::I0 + 7 == SP::I7 && SP::O0 + 7 == SP::O7,
233 "Unexpected enum");
234 if (Reg >= SP::I0 && Reg <= SP::I7)
235 return Reg - SP::I0 + SP::O0;
236 return Reg;
237}
238
240 CallingConv::ID CallConv, MachineFunction &MF, bool isVarArg,
241 const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context,
242 const Type *RetTy) const {
244 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
245 return CCInfo.CheckReturn(Outs, Subtarget->is64Bit() ? RetCC_Sparc64
246 : RetCC_Sparc32);
247}
248
251 bool IsVarArg,
253 const SmallVectorImpl<SDValue> &OutVals,
254 const SDLoc &DL, SelectionDAG &DAG) const {
255 if (Subtarget->is64Bit())
256 return LowerReturn_64(Chain, CallConv, IsVarArg, Outs, OutVals, DL, DAG);
257 return LowerReturn_32(Chain, CallConv, IsVarArg, Outs, OutVals, DL, DAG);
258}
259
262 bool IsVarArg,
264 const SmallVectorImpl<SDValue> &OutVals,
265 const SDLoc &DL, SelectionDAG &DAG) const {
267
268 // CCValAssign - represent the assignment of the return value to locations.
270
271 // CCState - Info about the registers and stack slot.
272 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
273 *DAG.getContext());
274
275 // Analyze return values.
276 CCInfo.AnalyzeReturn(Outs, RetCC_Sparc32);
277
278 SDValue Glue;
279 SmallVector<SDValue, 4> RetOps(1, Chain);
280 // Make room for the return address offset.
281 RetOps.push_back(SDValue());
282
283 // Copy the result values into the output registers.
284 for (unsigned i = 0, realRVLocIdx = 0;
285 i != RVLocs.size();
286 ++i, ++realRVLocIdx) {
287 CCValAssign &VA = RVLocs[i];
288 assert(VA.isRegLoc() && "Can only return in registers!");
289
290 SDValue Arg = OutVals[realRVLocIdx];
291
292 if (VA.needsCustom()) {
293 assert(VA.getLocVT() == MVT::v2i32);
294 // Legalize ret v2i32 -> ret 2 x i32 (Basically: do what would
295 // happen by default if this wasn't a legal type)
296
297 SDValue Part0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32,
298 Arg,
300 SDValue Part1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32,
301 Arg,
303
304 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Part0, Glue);
305 Glue = Chain.getValue(1);
306 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
307 VA = RVLocs[++i]; // skip ahead to next loc
308 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Part1,
309 Glue);
310 } else
311 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Arg, Glue);
312
313 // Guarantee that all emitted copies are stuck together with flags.
314 Glue = Chain.getValue(1);
315 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
316 }
317
318 unsigned RetAddrOffset = 8; // Call Inst + Delay Slot
319 // If the function returns a struct, copy the SRetReturnReg to I0
320 if (MF.getFunction().hasStructRetAttr()) {
322 Register Reg = SFI->getSRetReturnReg();
323 if (!Reg)
324 llvm_unreachable("sret virtual register not created in the entry block");
325 auto PtrVT = getPointerTy(DAG.getDataLayout());
326 SDValue Val = DAG.getCopyFromReg(Chain, DL, Reg, PtrVT);
327 Chain = DAG.getCopyToReg(Chain, DL, SP::I0, Val, Glue);
328 Glue = Chain.getValue(1);
329 RetOps.push_back(DAG.getRegister(SP::I0, PtrVT));
330 RetAddrOffset = 12; // CallInst + Delay Slot + Unimp
331 }
332
333 RetOps[0] = Chain; // Update chain.
334 RetOps[1] = DAG.getConstant(RetAddrOffset, DL, MVT::i32);
335
336 // Add the glue if we have it.
337 if (Glue.getNode())
338 RetOps.push_back(Glue);
339
340 return DAG.getNode(SPISD::RET_GLUE, DL, MVT::Other, RetOps);
341}
342
343// Lower return values for the 64-bit ABI.
344// Return values are passed the exactly the same way as function arguments.
347 bool IsVarArg,
349 const SmallVectorImpl<SDValue> &OutVals,
350 const SDLoc &DL, SelectionDAG &DAG) const {
351 // CCValAssign - represent the assignment of the return value to locations.
353
354 // CCState - Info about the registers and stack slot.
355 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
356 *DAG.getContext());
357
358 // Analyze return values.
359 CCInfo.AnalyzeReturn(Outs, RetCC_Sparc64);
360
361 SDValue Glue;
362 SmallVector<SDValue, 4> RetOps(1, Chain);
363
364 // The second operand on the return instruction is the return address offset.
365 // The return address is always %i7+8 with the 64-bit ABI.
366 RetOps.push_back(DAG.getConstant(8, DL, MVT::i32));
367
368 // Copy the result values into the output registers.
369 for (unsigned i = 0; i != RVLocs.size(); ++i) {
370 CCValAssign &VA = RVLocs[i];
371 assert(VA.isRegLoc() && "Can only return in registers!");
372 SDValue OutVal = OutVals[i];
373
374 // Integer return values must be sign or zero extended by the callee.
375 switch (VA.getLocInfo()) {
376 case CCValAssign::Full: break;
378 OutVal = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), OutVal);
379 break;
381 OutVal = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), OutVal);
382 break;
384 OutVal = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), OutVal);
385 break;
386 default:
387 llvm_unreachable("Unknown loc info!");
388 }
389
390 // The custom bit on an i32 return value indicates that it should be passed
391 // in the high bits of the register.
392 if (VA.getValVT() == MVT::i32 && VA.needsCustom()) {
393 OutVal = DAG.getNode(ISD::SHL, DL, MVT::i64, OutVal,
394 DAG.getConstant(32, DL, MVT::i32));
395
396 // The next value may go in the low bits of the same register.
397 // Handle both at once.
398 if (i+1 < RVLocs.size() && RVLocs[i+1].getLocReg() == VA.getLocReg()) {
399 SDValue NV = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, OutVals[i+1]);
400 OutVal = DAG.getNode(ISD::OR, DL, MVT::i64, OutVal, NV);
401 // Skip the next value, it's already done.
402 ++i;
403 }
404 }
405
406 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), OutVal, Glue);
407
408 // Guarantee that all emitted copies are stuck together with flags.
409 Glue = Chain.getValue(1);
410 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
411 }
412
413 RetOps[0] = Chain; // Update chain.
414
415 // Add the flag if we have it.
416 if (Glue.getNode())
417 RetOps.push_back(Glue);
418
419 return DAG.getNode(SPISD::RET_GLUE, DL, MVT::Other, RetOps);
420}
421
423 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
424 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
425 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
426 if (Subtarget->is64Bit())
427 return LowerFormalArguments_64(Chain, CallConv, IsVarArg, Ins,
428 DL, DAG, InVals);
429 return LowerFormalArguments_32(Chain, CallConv, IsVarArg, Ins,
430 DL, DAG, InVals);
431}
432
433/// LowerFormalArguments32 - V8 uses a very simple ABI, where all values are
434/// passed in either one or two GPRs, including FP values. TODO: we should
435/// pass FP values in FP registers for fastcc functions.
437 SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
438 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
439 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
441 MachineRegisterInfo &RegInfo = MF.getRegInfo();
443 EVT PtrVT = getPointerTy(DAG.getDataLayout());
444
445 // Assign locations to all of the incoming arguments.
447 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
448 *DAG.getContext());
449 CCInfo.AnalyzeFormalArguments(Ins, CC_Sparc32);
450
451 const unsigned StackOffset = 92;
452 bool IsLittleEndian = DAG.getDataLayout().isLittleEndian();
453
454 unsigned InIdx = 0;
455 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i, ++InIdx) {
456 CCValAssign &VA = ArgLocs[i];
457 EVT LocVT = VA.getLocVT();
458
459 if (Ins[InIdx].Flags.isSRet()) {
460 if (InIdx != 0)
461 report_fatal_error("sparc only supports sret on the first parameter");
462 // Get SRet from [%fp+64].
463 int FrameIdx = MF.getFrameInfo().CreateFixedObject(4, 64, true);
464 SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);
465 SDValue Arg =
466 DAG.getLoad(MVT::i32, dl, Chain, FIPtr, MachinePointerInfo());
467 InVals.push_back(Arg);
468 continue;
469 }
470
471 SDValue Arg;
472 if (VA.isRegLoc()) {
473 if (VA.needsCustom()) {
474 assert(VA.getLocVT() == MVT::f64 || VA.getLocVT() == MVT::v2i32);
475
476 Register VRegHi = RegInfo.createVirtualRegister(&SP::IntRegsRegClass);
477 MF.getRegInfo().addLiveIn(VA.getLocReg(), VRegHi);
478 SDValue HiVal = DAG.getCopyFromReg(Chain, dl, VRegHi, MVT::i32);
479
480 assert(i+1 < e);
481 CCValAssign &NextVA = ArgLocs[++i];
482
483 SDValue LoVal;
484 if (NextVA.isMemLoc()) {
485 int FrameIdx = MF.getFrameInfo().
486 CreateFixedObject(4, StackOffset+NextVA.getLocMemOffset(),true);
487 SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);
488 LoVal = DAG.getLoad(MVT::i32, dl, Chain, FIPtr, MachinePointerInfo());
489 } else {
490 Register loReg = MF.addLiveIn(NextVA.getLocReg(),
491 &SP::IntRegsRegClass);
492 LoVal = DAG.getCopyFromReg(Chain, dl, loReg, MVT::i32);
493 }
494
495 if (IsLittleEndian)
496 std::swap(LoVal, HiVal);
497
498 SDValue WholeValue =
499 DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, LoVal, HiVal);
500 WholeValue = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), WholeValue);
501 InVals.push_back(WholeValue);
502 continue;
503 }
504 Register VReg = RegInfo.createVirtualRegister(&SP::IntRegsRegClass);
505 MF.getRegInfo().addLiveIn(VA.getLocReg(), VReg);
506 Arg = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
507 if (VA.getLocInfo() != CCValAssign::Indirect) {
508 if (VA.getLocVT() == MVT::f32)
509 Arg = DAG.getNode(ISD::BITCAST, dl, MVT::f32, Arg);
510 else if (VA.getLocVT() != MVT::i32) {
511 Arg = DAG.getNode(ISD::AssertSext, dl, MVT::i32, Arg,
512 DAG.getValueType(VA.getLocVT()));
513 Arg = DAG.getNode(ISD::TRUNCATE, dl, VA.getLocVT(), Arg);
514 }
515 InVals.push_back(Arg);
516 continue;
517 }
518 } else {
519 assert(VA.isMemLoc());
520
521 unsigned Offset = VA.getLocMemOffset() + StackOffset;
522
523 if (VA.needsCustom()) {
524 assert(VA.getValVT() == MVT::f64 || VA.getValVT() == MVT::v2i32);
525 // If it is double-word aligned, just load.
526 if (Offset % 8 == 0) {
527 int FI = MF.getFrameInfo().CreateFixedObject(8, Offset, true);
528 SDValue FIPtr = DAG.getFrameIndex(FI, PtrVT);
529 SDValue Load = DAG.getLoad(VA.getValVT(), dl, Chain, FIPtr,
531 InVals.push_back(Load);
532 continue;
533 }
534
535 int FI = MF.getFrameInfo().CreateFixedObject(4, Offset, true);
536 SDValue FIPtr = DAG.getFrameIndex(FI, PtrVT);
537 SDValue HiVal =
538 DAG.getLoad(MVT::i32, dl, Chain, FIPtr, MachinePointerInfo());
539 int FI2 = MF.getFrameInfo().CreateFixedObject(4, Offset + 4, true);
540 SDValue FIPtr2 = DAG.getFrameIndex(FI2, PtrVT);
541
542 SDValue LoVal =
543 DAG.getLoad(MVT::i32, dl, Chain, FIPtr2, MachinePointerInfo());
544
545 if (IsLittleEndian)
546 std::swap(LoVal, HiVal);
547
548 SDValue WholeValue =
549 DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, LoVal, HiVal);
550 WholeValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), WholeValue);
551 InVals.push_back(WholeValue);
552 continue;
553 }
554
555 int FI = MF.getFrameInfo().CreateFixedObject(LocVT.getSizeInBits() / 8,
556 Offset, true);
557 SDValue FIPtr = DAG.getFrameIndex(FI, PtrVT);
558 SDValue Load = DAG.getLoad(LocVT, dl, Chain, FIPtr,
560 if (VA.getLocInfo() != CCValAssign::Indirect) {
561 InVals.push_back(Load);
562 continue;
563 }
564 Arg = Load;
565 }
566
568
569 SDValue ArgValue =
570 DAG.getLoad(VA.getValVT(), dl, Chain, Arg, MachinePointerInfo());
571 InVals.push_back(ArgValue);
572
573 unsigned ArgIndex = Ins[InIdx].OrigArgIndex;
574 assert(Ins[InIdx].PartOffset == 0);
575 while (i + 1 != e && Ins[InIdx + 1].OrigArgIndex == ArgIndex) {
576 CCValAssign &PartVA = ArgLocs[i + 1];
577 unsigned PartOffset = Ins[InIdx + 1].PartOffset;
579 ArgValue, TypeSize::getFixed(PartOffset), dl);
580 InVals.push_back(DAG.getLoad(PartVA.getValVT(), dl, Chain, Address,
582 ++i;
583 ++InIdx;
584 }
585 }
586
587 if (MF.getFunction().hasStructRetAttr()) {
588 // Copy the SRet Argument to SRetReturnReg.
590 Register Reg = SFI->getSRetReturnReg();
591 if (!Reg) {
592 Reg = MF.getRegInfo().createVirtualRegister(&SP::IntRegsRegClass);
593 SFI->setSRetReturnReg(Reg);
594 }
595 SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), dl, Reg, InVals[0]);
596 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Copy, Chain);
597 }
598
599 // Store remaining ArgRegs to the stack if this is a varargs function.
600 if (isVarArg) {
601 static const MCPhysReg ArgRegs[] = {
602 SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5
603 };
604 unsigned NumAllocated = CCInfo.getFirstUnallocated(ArgRegs);
605 const MCPhysReg *CurArgReg = ArgRegs+NumAllocated, *ArgRegEnd = ArgRegs+6;
606 unsigned ArgOffset = CCInfo.getStackSize();
607 if (NumAllocated == 6)
608 ArgOffset += StackOffset;
609 else {
610 assert(!ArgOffset);
611 ArgOffset = 68+4*NumAllocated;
612 }
613
614 // Remember the vararg offset for the va_start implementation.
615 FuncInfo->setVarArgsFrameOffset(ArgOffset);
616
617 std::vector<SDValue> OutChains;
618
619 for (; CurArgReg != ArgRegEnd; ++CurArgReg) {
620 Register VReg = RegInfo.createVirtualRegister(&SP::IntRegsRegClass);
621 MF.getRegInfo().addLiveIn(*CurArgReg, VReg);
622 SDValue Arg = DAG.getCopyFromReg(DAG.getRoot(), dl, VReg, MVT::i32);
623
624 int FrameIdx = MF.getFrameInfo().CreateFixedObject(4, ArgOffset,
625 true);
626 SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);
627
628 OutChains.push_back(
629 DAG.getStore(DAG.getRoot(), dl, Arg, FIPtr, MachinePointerInfo()));
630 ArgOffset += 4;
631 }
632
633 if (!OutChains.empty()) {
634 OutChains.push_back(Chain);
635 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
636 }
637 }
638
639 return Chain;
640}
641
642// Lower formal arguments for the 64 bit ABI.
644 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
645 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
646 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
648
649 // Analyze arguments according to CC_Sparc64.
651 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), ArgLocs,
652 *DAG.getContext());
653 CCInfo.AnalyzeFormalArguments(Ins, CC_Sparc64);
654
655 // The argument array begins at %fp+BIAS+128, after the register save area.
656 const unsigned ArgArea = 128;
657
658 for (const CCValAssign &VA : ArgLocs) {
659 if (VA.isRegLoc()) {
660 // This argument is passed in a register.
661 // All integer register arguments are promoted by the caller to i64.
662
663 // Create a virtual register for the promoted live-in value.
664 Register VReg = MF.addLiveIn(VA.getLocReg(),
665 getRegClassFor(VA.getLocVT()));
666 SDValue Arg = DAG.getCopyFromReg(Chain, DL, VReg, VA.getLocVT());
667
668 // Get the high bits for i32 struct elements.
669 if (VA.getValVT() == MVT::i32 && VA.needsCustom())
670 Arg = DAG.getNode(ISD::SRL, DL, VA.getLocVT(), Arg,
671 DAG.getConstant(32, DL, MVT::i32));
672
673 // The caller promoted the argument, so insert an Assert?ext SDNode so we
674 // won't promote the value again in this function.
675 switch (VA.getLocInfo()) {
677 Arg = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), Arg,
678 DAG.getValueType(VA.getValVT()));
679 break;
681 Arg = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), Arg,
682 DAG.getValueType(VA.getValVT()));
683 break;
684 default:
685 break;
686 }
687
688 // Truncate the register down to the argument type.
689 if (VA.isExtInLoc())
690 Arg = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Arg);
691
692 InVals.push_back(Arg);
693 continue;
694 }
695
696 // The registers are exhausted. This argument was passed on the stack.
697 assert(VA.isMemLoc());
698 // The CC_Sparc64_Full/Half functions compute stack offsets relative to the
699 // beginning of the arguments area at %fp+BIAS+128.
700 unsigned Offset = VA.getLocMemOffset() + ArgArea;
701 unsigned ValSize = VA.getValVT().getSizeInBits() / 8;
702 // Adjust offset for extended arguments, SPARC is big-endian.
703 // The caller will have written the full slot with extended bytes, but we
704 // prefer our own extending loads.
705 if (VA.isExtInLoc())
706 Offset += 8 - ValSize;
707 int FI = MF.getFrameInfo().CreateFixedObject(ValSize, Offset, true);
708 InVals.push_back(
709 DAG.getLoad(VA.getValVT(), DL, Chain,
712 }
713
714 if (!IsVarArg)
715 return Chain;
716
717 // This function takes variable arguments, some of which may have been passed
718 // in registers %i0-%i5. Variable floating point arguments are never passed
719 // in floating point registers. They go on %i0-%i5 or on the stack like
720 // integer arguments.
721 //
722 // The va_start intrinsic needs to know the offset to the first variable
723 // argument.
724 unsigned ArgOffset = CCInfo.getStackSize();
726 // Skip the 128 bytes of register save area.
727 FuncInfo->setVarArgsFrameOffset(ArgOffset + ArgArea +
728 Subtarget->getStackPointerBias());
729
730 // Save the variable arguments that were passed in registers.
731 // The caller is required to reserve stack space for 6 arguments regardless
732 // of how many arguments were actually passed.
733 SmallVector<SDValue, 8> OutChains;
734 for (; ArgOffset < 6*8; ArgOffset += 8) {
735 Register VReg = MF.addLiveIn(SP::I0 + ArgOffset/8, &SP::I64RegsRegClass);
736 SDValue VArg = DAG.getCopyFromReg(Chain, DL, VReg, MVT::i64);
737 int FI = MF.getFrameInfo().CreateFixedObject(8, ArgOffset + ArgArea, true);
738 auto PtrVT = getPointerTy(MF.getDataLayout());
739 OutChains.push_back(
740 DAG.getStore(Chain, DL, VArg, DAG.getFrameIndex(FI, PtrVT),
742 }
743
744 if (!OutChains.empty())
745 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains);
746
747 return Chain;
748}
749
750// Check whether any of the argument registers are reserved
752 const MachineFunction &MF) {
753 // The register window design means that outgoing parameters at O*
754 // will appear in the callee as I*.
755 // Be conservative and check both sides of the register names.
756 bool Outgoing =
757 llvm::any_of(SP::GPROutgoingArgRegClass, [TRI, &MF](MCPhysReg r) {
758 return TRI->isReservedReg(MF, r);
759 });
760 bool Incoming =
761 llvm::any_of(SP::GPRIncomingArgRegClass, [TRI, &MF](MCPhysReg r) {
762 return TRI->isReservedReg(MF, r);
763 });
764 return Outgoing || Incoming;
765}
766
768 const Function &F = MF.getFunction();
769 F.getContext().diagnose(DiagnosticInfoUnsupported{
770 F, ("SPARC doesn't support"
771 " function calls if any of the argument registers is reserved.")});
772}
773
776 SmallVectorImpl<SDValue> &InVals) const {
777 if (Subtarget->is64Bit())
778 return LowerCall_64(CLI, InVals);
779 return LowerCall_32(CLI, InVals);
780}
781
782static bool hasReturnsTwiceAttr(SelectionDAG &DAG, SDValue Callee,
783 const CallBase *Call) {
784 if (Call)
785 return Call->hasFnAttr(Attribute::ReturnsTwice);
786
787 const Function *CalleeFn = nullptr;
789 CalleeFn = dyn_cast<Function>(G->getGlobal());
790 } else if (ExternalSymbolSDNode *E =
792 const Function &Fn = DAG.getMachineFunction().getFunction();
793 const Module *M = Fn.getParent();
794 const char *CalleeName = E->getSymbol();
795 CalleeFn = M->getFunction(CalleeName);
796 }
797
798 if (!CalleeFn)
799 return false;
800 return CalleeFn->hasFnAttribute(Attribute::ReturnsTwice);
801}
802
803/// IsEligibleForTailCallOptimization - Check whether the call is eligible
804/// for tail call optimization.
806 CCState &CCInfo, CallLoweringInfo &CLI, MachineFunction &MF) const {
807
808 auto &Outs = CLI.Outs;
809 auto &Caller = MF.getFunction();
810
811 // Do not tail call opt functions with "disable-tail-calls" attribute.
812 if (Caller.getFnAttribute("disable-tail-calls").getValueAsString() == "true")
813 return false;
814
815 // Do not tail call opt if the stack is used to pass parameters.
816 // 64-bit targets have a slightly higher limit since the ABI requires
817 // to allocate some space even when all the parameters fit inside registers.
818 unsigned StackSizeLimit = Subtarget->is64Bit() ? 48 : 0;
819 if (CCInfo.getStackSize() > StackSizeLimit)
820 return false;
821
822 // Do not tail call opt if either the callee or caller returns
823 // a struct and the other does not.
824 if (!Outs.empty() && Caller.hasStructRetAttr() != Outs[0].Flags.isSRet())
825 return false;
826
827 // Byval parameters hand the function a pointer directly into the stack area
828 // we want to reuse during a tail call.
829 for (auto &Arg : Outs)
830 if (Arg.Flags.isByVal())
831 return false;
832
833 return true;
834}
835
836// Lower a call for the 32-bit ABI.
839 SmallVectorImpl<SDValue> &InVals) const {
840 SelectionDAG &DAG = CLI.DAG;
841 SDLoc &dl = CLI.DL;
843 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
845 SDValue Chain = CLI.Chain;
846 SDValue Callee = CLI.Callee;
847 bool &isTailCall = CLI.IsTailCall;
848 CallingConv::ID CallConv = CLI.CallConv;
849 bool isVarArg = CLI.IsVarArg;
851 LLVMContext &Ctx = *DAG.getContext();
852 EVT PtrVT = getPointerTy(MF.getDataLayout());
853
854 // Analyze operands of the call, assigning locations to each operand.
856 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
857 *DAG.getContext());
858 CCInfo.AnalyzeCallOperands(Outs, CC_Sparc32);
859
860 isTailCall = isTailCall && IsEligibleForTailCallOptimization(
861 CCInfo, CLI, DAG.getMachineFunction());
862
863 // Get the size of the outgoing arguments stack space requirement.
864 unsigned ArgsSize = CCInfo.getStackSize();
865
866 // Keep stack frames 8-byte aligned.
867 ArgsSize = (ArgsSize+7) & ~7;
868
870
871 // Create local copies for byval args.
872 SmallVector<SDValue, 8> ByValArgs;
873 for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
874 ISD::ArgFlagsTy Flags = Outs[i].Flags;
875 if (!Flags.isByVal())
876 continue;
877
878 SDValue Arg = OutVals[i];
879 unsigned Size = Flags.getByValSize();
880 Align Alignment = Flags.getNonZeroByValAlign();
881
882 if (Size > 0U) {
883 int FI = MFI.CreateStackObject(Size, Alignment, false);
884 SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
885 SDValue SizeNode = DAG.getConstant(Size, dl, MVT::i32);
886
887 Chain = DAG.getMemcpy(Chain, dl, FIPtr, Arg, SizeNode, Alignment,
888 false, // isVolatile,
889 (Size <= 32), // AlwaysInline if size <= 32,
890 /*CI=*/nullptr, std::nullopt, MachinePointerInfo(),
892 ByValArgs.push_back(FIPtr);
893 }
894 else {
895 SDValue nullVal;
896 ByValArgs.push_back(nullVal);
897 }
898 }
899
900 assert(!isTailCall || ArgsSize == 0);
901
902 if (!isTailCall)
903 Chain = DAG.getCALLSEQ_START(Chain, ArgsSize, 0, dl);
904
906 SmallVector<SDValue, 8> MemOpChains;
907
908 const unsigned StackOffset = 92;
909 bool hasStructRetAttr = false;
910 unsigned SRetArgSize = 0;
911 // Walk the register/memloc assignments, inserting copies/loads.
912 for (unsigned i = 0, realArgIdx = 0, byvalArgIdx = 0, e = ArgLocs.size();
913 i != e;
914 ++i, ++realArgIdx) {
915 CCValAssign &VA = ArgLocs[i];
916 SDValue Arg = OutVals[realArgIdx];
917
918 ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags;
919
920 // Use local copy if it is a byval arg.
921 if (Flags.isByVal()) {
922 Arg = ByValArgs[byvalArgIdx++];
923 if (!Arg) {
924 continue;
925 }
926 }
927
928 // Promote the value if needed.
929 switch (VA.getLocInfo()) {
930 default: llvm_unreachable("Unknown loc info!");
933 break;
935 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
936 break;
938 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
939 break;
941 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
942 break;
944 Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg);
945 break;
946 }
947
948 if (Flags.isSRet()) {
949 assert(VA.needsCustom());
950
951 if (isTailCall)
952 continue;
953
954 // store SRet argument in %sp+64
955 SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
956 SDValue PtrOff = DAG.getIntPtrConstant(64, dl);
957 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
958 MemOpChains.push_back(
959 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
960 hasStructRetAttr = true;
961 // sret only allowed on first argument
962 assert(Outs[realArgIdx].OrigArgIndex == 0);
963 SRetArgSize =
964 DAG.getDataLayout().getTypeAllocSize(CLI.getArgs()[0].IndirectType);
965 continue;
966 }
967
968 if (VA.needsCustom()) {
969 assert(VA.getLocVT() == MVT::f64 || VA.getLocVT() == MVT::v2i32);
970
971 if (VA.isMemLoc()) {
972 unsigned Offset = VA.getLocMemOffset() + StackOffset;
973 // if it is double-word aligned, just store.
974 if (Offset % 8 == 0) {
975 SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
976 SDValue PtrOff = DAG.getIntPtrConstant(Offset, dl);
977 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
978 MemOpChains.push_back(
979 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
980 continue;
981 }
982 }
983
984 if (VA.getLocVT() == MVT::f64) {
985 // Move from the float value from float registers into the
986 // integer registers.
988 Arg = bitcastConstantFPToInt(C, dl, DAG);
989 else
990 Arg = DAG.getNode(ISD::BITCAST, dl, MVT::v2i32, Arg);
991 }
992
993 SDValue Part0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
994 Arg,
995 DAG.getConstant(0, dl, getVectorIdxTy(DAG.getDataLayout())));
996 SDValue Part1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
997 Arg,
998 DAG.getConstant(1, dl, getVectorIdxTy(DAG.getDataLayout())));
999
1000 if (VA.isRegLoc()) {
1001 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Part0));
1002 assert(i+1 != e);
1003 CCValAssign &NextVA = ArgLocs[++i];
1004 if (NextVA.isRegLoc()) {
1005 RegsToPass.push_back(std::make_pair(NextVA.getLocReg(), Part1));
1006 } else {
1007 // Store the second part in stack.
1008 unsigned Offset = NextVA.getLocMemOffset() + StackOffset;
1009 SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
1010 SDValue PtrOff = DAG.getIntPtrConstant(Offset, dl);
1011 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
1012 MemOpChains.push_back(
1013 DAG.getStore(Chain, dl, Part1, PtrOff, MachinePointerInfo()));
1014 }
1015 } else {
1016 unsigned Offset = VA.getLocMemOffset() + StackOffset;
1017 // Store the first part.
1018 SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
1019 SDValue PtrOff = DAG.getIntPtrConstant(Offset, dl);
1020 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
1021 MemOpChains.push_back(
1022 DAG.getStore(Chain, dl, Part0, PtrOff, MachinePointerInfo()));
1023 // Store the second part.
1024 PtrOff = DAG.getIntPtrConstant(Offset + 4, dl);
1025 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
1026 MemOpChains.push_back(
1027 DAG.getStore(Chain, dl, Part1, PtrOff, MachinePointerInfo()));
1028 }
1029 continue;
1030 }
1031
1032 if (VA.getLocInfo() == CCValAssign::Indirect) {
1033 // Store the argument in a stack slot and pass its address.
1034 unsigned ArgIndex = Outs[realArgIdx].OrigArgIndex;
1035 assert(Outs[realArgIdx].PartOffset == 0);
1036
1037 EVT SlotVT;
1038 if (i + 1 != e && Outs[realArgIdx + 1].OrigArgIndex == ArgIndex) {
1039 Type *OrigArgType = CLI.Args[ArgIndex].Ty;
1040 EVT OrigArgVT = getValueType(MF.getDataLayout(), OrigArgType);
1041 MVT PartVT =
1042 getRegisterTypeForCallingConv(Ctx, CLI.CallConv, OrigArgVT);
1043 unsigned N =
1044 getNumRegistersForCallingConv(Ctx, CLI.CallConv, OrigArgVT);
1045 SlotVT = EVT::getIntegerVT(Ctx, PartVT.getSizeInBits() * N);
1046 } else {
1047 SlotVT = Outs[realArgIdx].VT;
1048 }
1049
1050 SDValue SpillSlot = DAG.CreateStackTemporary(SlotVT);
1051 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
1052 MemOpChains.push_back(
1053 DAG.getStore(Chain, dl, Arg, SpillSlot,
1055 // If the original argument was split (e.g. f128), we need
1056 // to store all parts of it here (and pass just one address).
1057 while (i + 1 != e && Outs[realArgIdx + 1].OrigArgIndex == ArgIndex) {
1058 SDValue PartValue = OutVals[realArgIdx + 1];
1059 unsigned PartOffset = Outs[realArgIdx + 1].PartOffset;
1061 DAG.getFrameIndex(FI, PtrVT), TypeSize::getFixed(PartOffset), dl);
1062 MemOpChains.push_back(
1063 DAG.getStore(Chain, dl, PartValue, Address,
1065 assert((PartOffset + PartValue.getValueType().getStoreSize() <=
1066 SlotVT.getStoreSize()) &&
1067 "Not enough space for argument part!");
1068 ++i;
1069 ++realArgIdx;
1070 }
1071
1072 Arg = SpillSlot;
1073 }
1074
1075 // Arguments that can be passed on register must be kept at
1076 // RegsToPass vector
1077 if (VA.isRegLoc()) {
1078 if (VA.getLocVT() != MVT::f32) {
1079 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
1080 continue;
1081 }
1082 Arg = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg);
1083 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
1084 continue;
1085 }
1086
1087 assert(VA.isMemLoc());
1088
1089 // Create a store off the stack pointer for this argument.
1090 SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
1092 dl);
1093 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
1094 MemOpChains.push_back(
1095 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
1096 }
1097
1098
1099 // Emit all stores, make sure the occur before any copies into physregs.
1100 if (!MemOpChains.empty())
1101 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
1102
1103 // Build a sequence of copy-to-reg nodes chained together with token
1104 // chain and flag operands which copy the outgoing args into registers.
1105 // The InGlue in necessary since all emitted instructions must be
1106 // stuck together.
1107 SDValue InGlue;
1108 for (const auto &[OrigReg, N] : RegsToPass) {
1109 Register Reg = isTailCall ? OrigReg : toCallerWindow(OrigReg);
1110 Chain = DAG.getCopyToReg(Chain, dl, Reg, N, InGlue);
1111 InGlue = Chain.getValue(1);
1112 }
1113
1114 bool hasReturnsTwice = hasReturnsTwiceAttr(DAG, Callee, CLI.CB);
1115
1116 // If the callee is a GlobalAddress node (quite common, every direct call is)
1117 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
1118 // Likewise ExternalSymbol -> TargetExternalSymbol.
1120 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, MVT::i32, 0);
1122 Callee = DAG.getTargetExternalSymbol(E->getSymbol(), MVT::i32);
1123
1124 // Returns a chain & a flag for retval copy to use
1125 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
1127 Ops.push_back(Chain);
1128 Ops.push_back(Callee);
1129 if (hasStructRetAttr)
1130 Ops.push_back(DAG.getTargetConstant(SRetArgSize, dl, MVT::i32));
1131 for (const auto &[OrigReg, N] : RegsToPass) {
1132 Register Reg = isTailCall ? OrigReg : toCallerWindow(OrigReg);
1133 Ops.push_back(DAG.getRegister(Reg, N.getValueType()));
1134 }
1135
1136 // Add a register mask operand representing the call-preserved registers.
1137 const SparcRegisterInfo *TRI = Subtarget->getRegisterInfo();
1138 const uint32_t *Mask =
1139 ((hasReturnsTwice)
1140 ? TRI->getRTCallPreservedMask(CallConv)
1141 : TRI->getCallPreservedMask(DAG.getMachineFunction(), CallConv));
1142
1143 if (isAnyArgRegReserved(TRI, MF))
1145
1146 assert(Mask && "Missing call preserved mask for calling convention");
1147 Ops.push_back(DAG.getRegisterMask(Mask));
1148
1149 if (InGlue.getNode())
1150 Ops.push_back(InGlue);
1151
1152 if (isTailCall) {
1154 return DAG.getNode(SPISD::TAIL_CALL, dl, MVT::Other, Ops);
1155 }
1156
1157 Chain = DAG.getNode(SPISD::CALL, dl, NodeTys, Ops);
1158 InGlue = Chain.getValue(1);
1159
1160 Chain = DAG.getCALLSEQ_END(Chain, ArgsSize, 0, InGlue, dl);
1161 InGlue = Chain.getValue(1);
1162
1163 // Assign locations to each value returned by this call.
1165 CCState RVInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
1166 *DAG.getContext());
1167
1168 RVInfo.AnalyzeCallResult(Ins, RetCC_Sparc32);
1169
1170 // Copy all of the result registers out of their specified physreg.
1171 for (unsigned i = 0; i != RVLocs.size(); ++i) {
1172 assert(RVLocs[i].isRegLoc() && "Can only return in registers!");
1173 if (RVLocs[i].getLocVT() == MVT::v2i32) {
1174 SDValue Vec = DAG.getNode(ISD::UNDEF, dl, MVT::v2i32);
1176 Chain, dl, toCallerWindow(RVLocs[i++].getLocReg()), MVT::i32, InGlue);
1177 Chain = Lo.getValue(1);
1178 InGlue = Lo.getValue(2);
1179 Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2i32, Vec, Lo,
1180 DAG.getConstant(0, dl, MVT::i32));
1182 Chain, dl, toCallerWindow(RVLocs[i].getLocReg()), MVT::i32, InGlue);
1183 Chain = Hi.getValue(1);
1184 InGlue = Hi.getValue(2);
1185 Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2i32, Vec, Hi,
1186 DAG.getConstant(1, dl, MVT::i32));
1187 InVals.push_back(Vec);
1188 } else {
1189 Chain =
1190 DAG.getCopyFromReg(Chain, dl, toCallerWindow(RVLocs[i].getLocReg()),
1191 RVLocs[i].getValVT(), InGlue)
1192 .getValue(1);
1193 InGlue = Chain.getValue(2);
1194 InVals.push_back(Chain.getValue(0));
1195 }
1196 }
1197
1198 return Chain;
1199}
1200
1201// FIXME? Maybe this could be a TableGen attribute on some registers and
1202// this table could be generated automatically from RegInfo.
1204 const MachineFunction &MF) const {
1206 .Case("i0", SP::I0).Case("i1", SP::I1).Case("i2", SP::I2).Case("i3", SP::I3)
1207 .Case("i4", SP::I4).Case("i5", SP::I5).Case("i6", SP::I6).Case("i7", SP::I7)
1208 .Case("o0", SP::O0).Case("o1", SP::O1).Case("o2", SP::O2).Case("o3", SP::O3)
1209 .Case("o4", SP::O4).Case("o5", SP::O5).Case("o6", SP::O6).Case("o7", SP::O7)
1210 .Case("l0", SP::L0).Case("l1", SP::L1).Case("l2", SP::L2).Case("l3", SP::L3)
1211 .Case("l4", SP::L4).Case("l5", SP::L5).Case("l6", SP::L6).Case("l7", SP::L7)
1212 .Case("g0", SP::G0).Case("g1", SP::G1).Case("g2", SP::G2).Case("g3", SP::G3)
1213 .Case("g4", SP::G4).Case("g5", SP::G5).Case("g6", SP::G6).Case("g7", SP::G7)
1214 .Default(0);
1215
1216 // If we're directly referencing register names
1217 // (e.g in GCC C extension `register int r asm("g1");`),
1218 // make sure that said register is in the reserve list.
1219 const SparcRegisterInfo *TRI = Subtarget->getRegisterInfo();
1220 if (!TRI->isReservedReg(MF, Reg))
1221 Reg = Register();
1222
1223 return Reg;
1224}
1225
1226// Fixup floating point arguments in the ... part of a varargs call.
1227//
1228// The SPARC v9 ABI requires that floating point arguments are treated the same
1229// as integers when calling a varargs function. This does not apply to the
1230// fixed arguments that are part of the function's prototype.
1231//
1232// This function post-processes a CCValAssign array created by
1233// AnalyzeCallOperands().
1236 for (CCValAssign &VA : ArgLocs) {
1237 MVT ValTy = VA.getLocVT();
1238 // FIXME: What about f32 arguments? C promotes them to f64 when calling
1239 // varargs functions.
1240 if (!VA.isRegLoc() || (ValTy != MVT::f64 && ValTy != MVT::f128))
1241 continue;
1242 // The fixed arguments to a varargs function still go in FP registers.
1243 if (!Outs[VA.getValNo()].Flags.isVarArg())
1244 continue;
1245
1246 // This floating point argument should be reassigned.
1247 // Determine the offset into the argument array.
1248 Register firstReg = (ValTy == MVT::f64) ? SP::D0 : SP::Q0;
1249 unsigned argSize = (ValTy == MVT::f64) ? 8 : 16;
1250 unsigned Offset = argSize * (VA.getLocReg() - firstReg);
1251 assert(Offset < 16*8 && "Offset out of range, bad register enum?");
1252
1253 if (Offset < 6*8) {
1254 // This argument should go in %i0-%i5.
1255 unsigned IReg = SP::I0 + Offset/8;
1256 if (ValTy == MVT::f64)
1257 // Full register, just bitconvert into i64.
1258 VA = CCValAssign::getReg(VA.getValNo(), VA.getValVT(), IReg, MVT::i64,
1260 else {
1261 assert(ValTy == MVT::f128 && "Unexpected type!");
1262 // Full register, just bitconvert into i128 -- We will lower this into
1263 // two i64s in LowerCall_64.
1264 VA = CCValAssign::getCustomReg(VA.getValNo(), VA.getValVT(), IReg,
1265 MVT::i128, CCValAssign::BCvt);
1266 }
1267 } else {
1268 // This needs to go to memory, we're out of integer registers.
1269 VA = CCValAssign::getMem(VA.getValNo(), VA.getValVT(), Offset,
1270 VA.getLocVT(), VA.getLocInfo());
1271 }
1272 }
1273}
1274
1275// Lower a call for the 64-bit ABI.
1276SDValue
1278 SmallVectorImpl<SDValue> &InVals) const {
1279 SelectionDAG &DAG = CLI.DAG;
1280 SDLoc DL = CLI.DL;
1281 SDValue Chain = CLI.Chain;
1282 auto PtrVT = getPointerTy(DAG.getDataLayout());
1284
1285 // Analyze operands of the call, assigning locations to each operand.
1287 CCState CCInfo(CLI.CallConv, CLI.IsVarArg, DAG.getMachineFunction(), ArgLocs,
1288 *DAG.getContext());
1289 CCInfo.AnalyzeCallOperands(CLI.Outs, CC_Sparc64);
1290
1292 CCInfo, CLI, DAG.getMachineFunction());
1293
1294 // Get the size of the outgoing arguments stack space requirement.
1295 // The stack offset computed by CC_Sparc64 includes all arguments.
1296 // Called functions expect 6 argument words to exist in the stack frame, used
1297 // or not.
1298 unsigned StackReserved = 6 * 8u;
1299 unsigned ArgsSize = std::max<unsigned>(StackReserved, CCInfo.getStackSize());
1300
1301 // Keep stack frames 16-byte aligned.
1302 ArgsSize = alignTo(ArgsSize, 16);
1303
1304 // Varargs calls require special treatment.
1305 if (CLI.IsVarArg)
1306 fixupVariableFloatArgs(ArgLocs, CLI.Outs);
1307
1308 assert(!CLI.IsTailCall || ArgsSize == StackReserved);
1309
1310 // Adjust the stack pointer to make room for the arguments.
1311 // FIXME: Use hasReservedCallFrame to avoid %sp adjustments around all calls
1312 // with more than 6 arguments.
1313 if (!CLI.IsTailCall)
1314 Chain = DAG.getCALLSEQ_START(Chain, ArgsSize, 0, DL);
1315
1316 // Collect the set of registers to pass to the function and their values.
1317 // This will be emitted as a sequence of CopyToReg nodes glued to the call
1318 // instruction.
1320
1321 // Collect chains from all the memory opeations that copy arguments to the
1322 // stack. They must follow the stack pointer adjustment above and precede the
1323 // call instruction itself.
1324 SmallVector<SDValue, 8> MemOpChains;
1325
1326 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1327 const CCValAssign &VA = ArgLocs[i];
1328 SDValue Arg = CLI.OutVals[i];
1329
1330 // Promote the value if needed.
1331 switch (VA.getLocInfo()) {
1332 default:
1333 llvm_unreachable("Unknown location info!");
1334 case CCValAssign::Full:
1335 break;
1336 case CCValAssign::SExt:
1337 Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Arg);
1338 break;
1339 case CCValAssign::ZExt:
1340 Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg);
1341 break;
1342 case CCValAssign::AExt:
1343 Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg);
1344 break;
1345 case CCValAssign::BCvt:
1346 // fixupVariableFloatArgs() may create bitcasts from f128 to i128. But
1347 // SPARC does not support i128 natively. Lower it into two i64, see below.
1348 if (!VA.needsCustom() || VA.getValVT() != MVT::f128
1349 || VA.getLocVT() != MVT::i128)
1350 Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg);
1351 break;
1352 }
1353
1354 if (VA.isRegLoc()) {
1355 if (VA.needsCustom() && VA.getValVT() == MVT::f128
1356 && VA.getLocVT() == MVT::i128) {
1357 // Store and reload into the integer register reg and reg+1.
1358 unsigned Offset = 8 * (VA.getLocReg() - SP::I0);
1359 unsigned StackOffset = Offset + Subtarget->getStackPointerBias() + 128;
1360 SDValue StackPtr = DAG.getRegister(SP::O6, PtrVT);
1361 SDValue HiPtrOff = DAG.getIntPtrConstant(StackOffset, DL);
1362 HiPtrOff = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, HiPtrOff);
1363 SDValue LoPtrOff = DAG.getIntPtrConstant(StackOffset + 8, DL);
1364 LoPtrOff = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, LoPtrOff);
1365
1366 // Store to %sp+BIAS+128+Offset
1367 SDValue Store =
1368 DAG.getStore(Chain, DL, Arg, HiPtrOff, MachinePointerInfo());
1369 // Load into Reg and Reg+1
1370 SDValue Hi64 =
1371 DAG.getLoad(MVT::i64, DL, Store, HiPtrOff, MachinePointerInfo());
1372 SDValue Lo64 =
1373 DAG.getLoad(MVT::i64, DL, Store, LoPtrOff, MachinePointerInfo());
1374
1375 Register HiReg = VA.getLocReg();
1376 Register LoReg = VA.getLocReg() + 1;
1377 if (!CLI.IsTailCall) {
1378 HiReg = toCallerWindow(HiReg);
1379 LoReg = toCallerWindow(LoReg);
1380 }
1381
1382 RegsToPass.push_back(std::make_pair(HiReg, Hi64));
1383 RegsToPass.push_back(std::make_pair(LoReg, Lo64));
1384 continue;
1385 }
1386
1387 // The custom bit on an i32 return value indicates that it should be
1388 // passed in the high bits of the register.
1389 if (VA.getValVT() == MVT::i32 && VA.needsCustom()) {
1390 Arg = DAG.getNode(ISD::SHL, DL, MVT::i64, Arg,
1391 DAG.getConstant(32, DL, MVT::i32));
1392
1393 // The next value may go in the low bits of the same register.
1394 // Handle both at once.
1395 if (i+1 < ArgLocs.size() && ArgLocs[i+1].isRegLoc() &&
1396 ArgLocs[i+1].getLocReg() == VA.getLocReg()) {
1397 SDValue NV = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64,
1398 CLI.OutVals[i+1]);
1399 Arg = DAG.getNode(ISD::OR, DL, MVT::i64, Arg, NV);
1400 // Skip the next value, it's already done.
1401 ++i;
1402 }
1403 }
1404
1405 Register Reg = VA.getLocReg();
1406 if (!CLI.IsTailCall)
1407 Reg = toCallerWindow(Reg);
1408 RegsToPass.push_back(std::make_pair(Reg, Arg));
1409 continue;
1410 }
1411
1412 assert(VA.isMemLoc());
1413
1414 // Create a store off the stack pointer for this argument.
1415 SDValue StackPtr = DAG.getRegister(SP::O6, PtrVT);
1416 // The argument area starts at %fp+BIAS+128 in the callee frame,
1417 // %sp+BIAS+128 in ours.
1418 SDValue PtrOff = DAG.getIntPtrConstant(VA.getLocMemOffset() +
1419 Subtarget->getStackPointerBias() +
1420 128, DL);
1421 PtrOff = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, PtrOff);
1422 MemOpChains.push_back(
1423 DAG.getStore(Chain, DL, Arg, PtrOff, MachinePointerInfo()));
1424 }
1425
1426 // Emit all stores, make sure they occur before the call.
1427 if (!MemOpChains.empty())
1428 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
1429
1430 // Build a sequence of CopyToReg nodes glued together with token chain and
1431 // glue operands which copy the outgoing args into registers. The InGlue is
1432 // necessary since all emitted instructions must be stuck together in order
1433 // to pass the live physical registers.
1434 SDValue InGlue;
1435 for (const auto &[Reg, N] : RegsToPass) {
1436 Chain = DAG.getCopyToReg(Chain, DL, Reg, N, InGlue);
1437 InGlue = Chain.getValue(1);
1438 }
1439
1440 // If the callee is a GlobalAddress node (quite common, every direct call is)
1441 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
1442 // Likewise ExternalSymbol -> TargetExternalSymbol.
1443 SDValue Callee = CLI.Callee;
1444 bool hasReturnsTwice = hasReturnsTwiceAttr(DAG, Callee, CLI.CB);
1446 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), DL, PtrVT, 0);
1448 Callee = DAG.getTargetExternalSymbol(E->getSymbol(), PtrVT);
1449
1450 // Build the operands for the call instruction itself.
1452 Ops.push_back(Chain);
1453 Ops.push_back(Callee);
1454 for (const auto &[Reg, N] : RegsToPass)
1455 Ops.push_back(DAG.getRegister(Reg, N.getValueType()));
1456
1457 // Add a register mask operand representing the call-preserved registers.
1458 const SparcRegisterInfo *TRI = Subtarget->getRegisterInfo();
1459 const uint32_t *Mask =
1460 ((hasReturnsTwice) ? TRI->getRTCallPreservedMask(CLI.CallConv)
1461 : TRI->getCallPreservedMask(DAG.getMachineFunction(),
1462 CLI.CallConv));
1463
1464 if (isAnyArgRegReserved(TRI, MF))
1466
1467 assert(Mask && "Missing call preserved mask for calling convention");
1468 Ops.push_back(DAG.getRegisterMask(Mask));
1469
1470 // Make sure the CopyToReg nodes are glued to the call instruction which
1471 // consumes the registers.
1472 if (InGlue.getNode())
1473 Ops.push_back(InGlue);
1474
1475 // Now the call itself.
1476 if (CLI.IsTailCall) {
1478 return DAG.getNode(SPISD::TAIL_CALL, DL, MVT::Other, Ops);
1479 }
1480 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
1481 Chain = DAG.getNode(SPISD::CALL, DL, NodeTys, Ops);
1482 InGlue = Chain.getValue(1);
1483
1484 // Revert the stack pointer immediately after the call.
1485 Chain = DAG.getCALLSEQ_END(Chain, ArgsSize, 0, InGlue, DL);
1486 InGlue = Chain.getValue(1);
1487
1488 // Now extract the return values. This is more or less the same as
1489 // LowerFormalArguments_64.
1490
1491 // Assign locations to each value returned by this call.
1493 CCState RVInfo(CLI.CallConv, CLI.IsVarArg, DAG.getMachineFunction(), RVLocs,
1494 *DAG.getContext());
1495
1496 // Set inreg flag manually for codegen generated library calls that
1497 // return float.
1498 if (CLI.Ins.size() == 1 && CLI.Ins[0].VT == MVT::f32 && !CLI.CB)
1499 CLI.Ins[0].Flags.setInReg();
1500
1501 RVInfo.AnalyzeCallResult(CLI.Ins, RetCC_Sparc64);
1502
1503 // Copy all of the result registers out of their specified physreg.
1504 for (unsigned i = 0; i != RVLocs.size(); ++i) {
1505 CCValAssign &VA = RVLocs[i];
1506 assert(VA.isRegLoc() && "Can only return in registers!");
1507 unsigned Reg = toCallerWindow(VA.getLocReg());
1508
1509 // When returning 'inreg {i32, i32 }', two consecutive i32 arguments can
1510 // reside in the same register in the high and low bits. Reuse the
1511 // CopyFromReg previous node to avoid duplicate copies.
1512 SDValue RV;
1513 if (RegisterSDNode *SrcReg = dyn_cast<RegisterSDNode>(Chain.getOperand(1)))
1514 if (SrcReg->getReg() == Reg && Chain->getOpcode() == ISD::CopyFromReg)
1515 RV = Chain.getValue(0);
1516
1517 // But usually we'll create a new CopyFromReg for a different register.
1518 if (!RV.getNode()) {
1519 RV = DAG.getCopyFromReg(Chain, DL, Reg, RVLocs[i].getLocVT(), InGlue);
1520 Chain = RV.getValue(1);
1521 InGlue = Chain.getValue(2);
1522 }
1523
1524 // Get the high bits for i32 struct elements.
1525 if (VA.getValVT() == MVT::i32 && VA.needsCustom())
1526 RV = DAG.getNode(ISD::SRL, DL, VA.getLocVT(), RV,
1527 DAG.getConstant(32, DL, MVT::i32));
1528
1529 // The callee promoted the return value, so insert an Assert?ext SDNode so
1530 // we won't promote the value again in this function.
1531 switch (VA.getLocInfo()) {
1532 case CCValAssign::SExt:
1533 RV = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), RV,
1534 DAG.getValueType(VA.getValVT()));
1535 break;
1536 case CCValAssign::ZExt:
1537 RV = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), RV,
1538 DAG.getValueType(VA.getValVT()));
1539 break;
1540 default:
1541 break;
1542 }
1543
1544 // Truncate the register down to the return value type.
1545 if (VA.isExtInLoc())
1546 RV = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), RV);
1547
1548 InVals.push_back(RV);
1549 }
1550
1551 return Chain;
1552}
1553
1554//===----------------------------------------------------------------------===//
1555// TargetLowering Implementation
1556//===----------------------------------------------------------------------===//
1557
1565
1566/// intCondCCodeToRcond - Convert a DAG integer condition code to a SPARC
1567/// rcond condition.
1569 switch (CC) {
1570 default:
1571 llvm_unreachable("Unknown/unsigned integer condition code!");
1572 case ISD::SETEQ:
1573 return SPCC::REG_Z;
1574 case ISD::SETNE:
1575 return SPCC::REG_NZ;
1576 case ISD::SETLT:
1577 return SPCC::REG_LZ;
1578 case ISD::SETGT:
1579 return SPCC::REG_GZ;
1580 case ISD::SETLE:
1581 return SPCC::REG_LEZ;
1582 case ISD::SETGE:
1583 return SPCC::REG_GEZ;
1584 }
1585}
1586
1587/// IntCondCCodeToICC - Convert a DAG integer condition code to a SPARC ICC
1588/// condition.
1590 switch (CC) {
1591 default: llvm_unreachable("Unknown integer condition code!");
1592 case ISD::SETEQ: return SPCC::ICC_E;
1593 case ISD::SETNE: return SPCC::ICC_NE;
1594 case ISD::SETLT: return SPCC::ICC_L;
1595 case ISD::SETGT: return SPCC::ICC_G;
1596 case ISD::SETLE: return SPCC::ICC_LE;
1597 case ISD::SETGE: return SPCC::ICC_GE;
1598 case ISD::SETULT: return SPCC::ICC_CS;
1599 case ISD::SETULE: return SPCC::ICC_LEU;
1600 case ISD::SETUGT: return SPCC::ICC_GU;
1601 case ISD::SETUGE: return SPCC::ICC_CC;
1602 }
1603}
1604
1605/// FPCondCCodeToFCC - Convert a DAG floatingp oint condition code to a SPARC
1606/// FCC condition.
1608 switch (CC) {
1609 default: llvm_unreachable("Unknown fp condition code!");
1610 case ISD::SETEQ:
1611 case ISD::SETOEQ: return SPCC::FCC_E;
1612 case ISD::SETNE:
1613 case ISD::SETUNE: return SPCC::FCC_NE;
1614 case ISD::SETLT:
1615 case ISD::SETOLT: return SPCC::FCC_L;
1616 case ISD::SETGT:
1617 case ISD::SETOGT: return SPCC::FCC_G;
1618 case ISD::SETLE:
1619 case ISD::SETOLE: return SPCC::FCC_LE;
1620 case ISD::SETGE:
1621 case ISD::SETOGE: return SPCC::FCC_GE;
1622 case ISD::SETULT: return SPCC::FCC_UL;
1623 case ISD::SETULE: return SPCC::FCC_ULE;
1624 case ISD::SETUGT: return SPCC::FCC_UG;
1625 case ISD::SETUGE: return SPCC::FCC_UGE;
1626 case ISD::SETUO: return SPCC::FCC_U;
1627 case ISD::SETO: return SPCC::FCC_O;
1628 case ISD::SETONE: return SPCC::FCC_LG;
1629 case ISD::SETUEQ: return SPCC::FCC_UE;
1630 }
1631}
1632
1634 const SparcSubtarget &STI)
1635 : TargetLowering(TM, STI), Subtarget(&STI) {
1636 MVT PtrVT = MVT::getIntegerVT(TM.getPointerSizeInBits(0));
1637
1638 // Instructions which use registers as conditionals examine all the
1639 // bits (as does the pseudo SELECT_CC expansion). I don't think it
1640 // matters much whether it's ZeroOrOneBooleanContent, or
1641 // ZeroOrNegativeOneBooleanContent, so, arbitrarily choose the
1642 // former.
1645
1646 // Set up the register classes.
1647 addRegisterClass(MVT::i32, &SP::IntRegsRegClass);
1648 if (!Subtarget->useSoftFloat()) {
1649 addRegisterClass(MVT::f32, &SP::FPRegsRegClass);
1650 addRegisterClass(MVT::f64, &SP::DFPRegsRegClass);
1651 addRegisterClass(MVT::f128, &SP::QFPRegsRegClass);
1652 }
1653 if (Subtarget->is64Bit()) {
1654 addRegisterClass(MVT::i64, &SP::I64RegsRegClass);
1655 } else {
1656 // On 32bit sparc, we define a double-register 32bit register
1657 // class, as well. This is modeled in LLVM as a 2-vector of i32.
1658 addRegisterClass(MVT::v2i32, &SP::IntPairRegClass);
1659
1660 // ...but almost all operations must be expanded, so set that as
1661 // the default.
1662 for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) {
1663 setOperationAction(Op, MVT::v2i32, Expand);
1664 }
1665 // Truncating/extending stores/loads are also not supported.
1667 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i32, Expand);
1668 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v2i32, Expand);
1669 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i32, Expand);
1670
1671 setLoadExtAction(ISD::SEXTLOAD, MVT::v2i32, VT, Expand);
1672 setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i32, VT, Expand);
1673 setLoadExtAction(ISD::EXTLOAD, MVT::v2i32, VT, Expand);
1674
1675 setTruncStoreAction(VT, MVT::v2i32, Expand);
1676 setTruncStoreAction(MVT::v2i32, VT, Expand);
1677 }
1678 // However, load and store *are* legal.
1679 setOperationAction(ISD::LOAD, MVT::v2i32, Legal);
1680 setOperationAction(ISD::STORE, MVT::v2i32, Legal);
1683
1684 // And we need to promote i64 loads/stores into vector load/store
1685 setOperationAction(ISD::LOAD, MVT::i64, Custom);
1686 setOperationAction(ISD::STORE, MVT::i64, Custom);
1687
1688 // Sadly, this doesn't work:
1689 // AddPromotedToType(ISD::LOAD, MVT::i64, MVT::v2i32);
1690 // AddPromotedToType(ISD::STORE, MVT::i64, MVT::v2i32);
1691 }
1692
1693 // Turn FP extload into load/fpextend
1694 for (MVT VT : MVT::fp_valuetypes()) {
1695 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f16, Expand);
1696 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f32, Expand);
1697 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f64, Expand);
1698 }
1699
1700 // Sparc doesn't have i1 sign extending load
1701 for (MVT VT : MVT::integer_valuetypes())
1702 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
1703
1704 // Turn FP truncstore into trunc + store.
1705 setTruncStoreAction(MVT::f32, MVT::f16, Expand);
1706 setTruncStoreAction(MVT::f64, MVT::f16, Expand);
1707 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
1708 setTruncStoreAction(MVT::f128, MVT::f16, Expand);
1709 setTruncStoreAction(MVT::f128, MVT::f32, Expand);
1710 setTruncStoreAction(MVT::f128, MVT::f64, Expand);
1711
1712 // Custom legalize GlobalAddress nodes into LO/HI parts.
1717
1718 // Sparc doesn't have sext_inreg, replace them with shl/sra
1722
1723 // Sparc has no REM or DIVREM operations.
1728
1729 // ... nor does SparcV9.
1730 if (Subtarget->is64Bit()) {
1735 }
1736
1737 // Custom expand fp<->sint
1742
1743 // Custom Expand fp<->uint
1748
1749 // Lower f16 conversion operations into library calls
1750 setOperationAction(ISD::FP16_TO_FP, MVT::f32, Expand);
1751 setOperationAction(ISD::FP_TO_FP16, MVT::f32, Expand);
1752 setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand);
1753 setOperationAction(ISD::FP_TO_FP16, MVT::f64, Expand);
1754 setOperationAction(ISD::FP16_TO_FP, MVT::f128, Expand);
1755 setOperationAction(ISD::FP_TO_FP16, MVT::f128, Expand);
1756
1757 setOperationAction(ISD::BITCAST, MVT::f32,
1758 Subtarget->isVIS3() ? Legal : Expand);
1759 setOperationAction(ISD::BITCAST, MVT::i32,
1760 Subtarget->isVIS3() ? Legal : Expand);
1761
1762 // Sparc has no select or setcc: expand to SELECT_CC.
1767
1772
1773 // Sparc doesn't have BRCOND either, it has BR_CC.
1774 setOperationAction(ISD::BRCOND, MVT::Other, Expand);
1775 setOperationAction(ISD::BRIND, MVT::Other, Expand);
1776 setOperationAction(ISD::BR_JT, MVT::Other, Expand);
1777 setOperationAction(ISD::BR_CC, MVT::i32, Custom);
1778 setOperationAction(ISD::BR_CC, MVT::f32, Custom);
1779 setOperationAction(ISD::BR_CC, MVT::f64, Custom);
1780 setOperationAction(ISD::BR_CC, MVT::f128, Custom);
1781
1786
1791
1792 if (Subtarget->isVIS3()) {
1795 }
1796
1797 if (Subtarget->is64Bit()) {
1798 setOperationAction(ISD::BITCAST, MVT::f64,
1799 Subtarget->isVIS3() ? Legal : Expand);
1800 setOperationAction(ISD::BITCAST, MVT::i64,
1801 Subtarget->isVIS3() ? Legal : Expand);
1804 setOperationAction(ISD::BR_CC, MVT::i64, Custom);
1806
1808 Subtarget->usePopc() ? Legal : Expand);
1810 setOperationAction(ISD::ROTL , MVT::i64, Expand);
1811 setOperationAction(ISD::ROTR , MVT::i64, Expand);
1812 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Custom);
1813 }
1814
1815 // ATOMICs.
1816 // Atomics are supported on SparcV9. 32-bit atomics are also
1817 // supported by some Leon SparcV8 variants. Otherwise, atomics
1818 // are unsupported.
1819 if (Subtarget->isV9()) {
1820 // TODO: we _ought_ to be able to support 64-bit atomics on 32-bit sparcv9,
1821 // but it hasn't been implemented in the backend yet.
1822 if (Subtarget->is64Bit())
1824 else
1826 } else if (Subtarget->hasLeonCasa())
1828 else
1830
1832
1833 setOperationAction(ISD::ATOMIC_SWAP, MVT::i32, Legal);
1834
1835 setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Legal);
1836
1837 // Custom Lower Atomic LOAD/STORE
1838 setOperationAction(ISD::ATOMIC_LOAD, MVT::i32, Custom);
1839 setOperationAction(ISD::ATOMIC_STORE, MVT::i32, Custom);
1840
1841 if (Subtarget->is64Bit()) {
1842 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i64, Legal);
1843 setOperationAction(ISD::ATOMIC_SWAP, MVT::i64, Legal);
1844 setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Custom);
1845 setOperationAction(ISD::ATOMIC_STORE, MVT::i64, Custom);
1846 }
1847
1848 if (!Subtarget->isV9()) {
1849 // SparcV8 does not have FNEGD and FABSD.
1850 setOperationAction(ISD::FNEG, MVT::f64, Custom);
1851 setOperationAction(ISD::FABS, MVT::f64, Custom);
1852 }
1853
1854 setOperationAction(ISD::FSIN , MVT::f128, Expand);
1855 setOperationAction(ISD::FCOS , MVT::f128, Expand);
1856 setOperationAction(ISD::FSINCOS, MVT::f128, Expand);
1857 setOperationAction(ISD::FREM , MVT::f128, Expand);
1858 setOperationAction(ISD::FMA , MVT::f128, Expand);
1859 setOperationAction(ISD::FSIN , MVT::f64, Expand);
1860 setOperationAction(ISD::FCOS , MVT::f64, Expand);
1861 setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
1862 setOperationAction(ISD::FREM , MVT::f64, Expand);
1863 setOperationAction(ISD::FMA, MVT::f64,
1864 Subtarget->isUA2007() ? Legal : Expand);
1865 setOperationAction(ISD::FSIN , MVT::f32, Expand);
1866 setOperationAction(ISD::FCOS , MVT::f32, Expand);
1867 setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
1868 setOperationAction(ISD::FREM , MVT::f32, Expand);
1869 setOperationAction(ISD::FMA, MVT::f32,
1870 Subtarget->isUA2007() ? Legal : Expand);
1871 setOperationAction(ISD::ROTL , MVT::i32, Expand);
1872 setOperationAction(ISD::ROTR , MVT::i32, Expand);
1877 setOperationAction(ISD::FPOW , MVT::f128, Expand);
1878 setOperationAction(ISD::FPOW , MVT::f64, Expand);
1879 setOperationAction(ISD::FPOW , MVT::f32, Expand);
1880
1884
1885 // Expands to [SU]MUL_LOHI.
1889
1890 if (Subtarget->useSoftMulDiv()) {
1891 // .umul works for both signed and unsigned
1896 }
1897
1898 if (Subtarget->is64Bit()) {
1902 Subtarget->isVIS3() ? Legal : Expand);
1904 Subtarget->isVIS3() ? Legal : Expand);
1905
1909 }
1910
1911 // VASTART needs to be custom lowered to use the VarArgsFrameIndex.
1912 setOperationAction(ISD::VASTART , MVT::Other, Custom);
1913 // VAARG needs to be lowered to not do unaligned accesses for doubles.
1914 setOperationAction(ISD::VAARG , MVT::Other, Custom);
1915
1916 setOperationAction(ISD::TRAP , MVT::Other, Legal);
1917 setOperationAction(ISD::DEBUGTRAP , MVT::Other, Legal);
1918
1919 // Use the default implementation.
1920 setOperationAction(ISD::VACOPY , MVT::Other, Expand);
1921 setOperationAction(ISD::VAEND , MVT::Other, Expand);
1922 setOperationAction(ISD::STACKSAVE , MVT::Other, Expand);
1923 setOperationAction(ISD::STACKRESTORE , MVT::Other, Expand);
1924 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Custom);
1925
1927
1929 Subtarget->usePopc() ? Legal : Expand);
1930
1931 if (Subtarget->isV9() && Subtarget->hasHardQuad()) {
1932 setOperationAction(ISD::LOAD, MVT::f128, Legal);
1933 setOperationAction(ISD::STORE, MVT::f128, Legal);
1934 } else {
1935 setOperationAction(ISD::LOAD, MVT::f128, Custom);
1936 setOperationAction(ISD::STORE, MVT::f128, Custom);
1937 }
1938
1939 if (Subtarget->hasHardQuad()) {
1940 setOperationAction(ISD::FADD, MVT::f128, Legal);
1941 setOperationAction(ISD::FSUB, MVT::f128, Legal);
1942 setOperationAction(ISD::FMUL, MVT::f128, Legal);
1943 setOperationAction(ISD::FDIV, MVT::f128, Legal);
1944 setOperationAction(ISD::FSQRT, MVT::f128, Legal);
1945 setOperationAction(ISD::FP_EXTEND, MVT::f128, Legal);
1947 if (Subtarget->isV9()) {
1948 setOperationAction(ISD::FNEG, MVT::f128, Legal);
1949 setOperationAction(ISD::FABS, MVT::f128, Legal);
1950 } else {
1951 setOperationAction(ISD::FNEG, MVT::f128, Custom);
1952 setOperationAction(ISD::FABS, MVT::f128, Custom);
1953 }
1954 } else {
1955 // Custom legalize f128 operations.
1956
1957 setOperationAction(ISD::FADD, MVT::f128, Custom);
1958 setOperationAction(ISD::FSUB, MVT::f128, Custom);
1959 setOperationAction(ISD::FMUL, MVT::f128, Custom);
1960 setOperationAction(ISD::FDIV, MVT::f128, Custom);
1961 setOperationAction(ISD::FSQRT, MVT::f128, Custom);
1962 setOperationAction(ISD::FNEG, MVT::f128, Custom);
1963 setOperationAction(ISD::FABS, MVT::f128, Custom);
1964
1965 setOperationAction(ISD::FP_EXTEND, MVT::f128, Custom);
1968 }
1969
1970 if (Subtarget->fixAllFDIVSQRT()) {
1971 // Promote FDIVS and FSQRTS to FDIVD and FSQRTD instructions instead as
1972 // the former instructions generate errata on LEON processors.
1974 setOperationAction(ISD::FSQRT, MVT::f32, Promote);
1975 }
1976
1977 if (Subtarget->hasNoFMULS()) {
1979 }
1980
1981 // Custom combine bitcast between f64 and v2i32
1982 if (!Subtarget->is64Bit())
1983 setTargetDAGCombine(ISD::BITCAST);
1984
1985 if (Subtarget->hasLeonCycleCounter())
1986 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Custom);
1987
1988 if (Subtarget->isVIS3()) {
1993
1994 setOperationAction(ISD::CTTZ, MVT::i32,
1995 Subtarget->is64Bit() ? Promote : Expand);
1998 Subtarget->is64Bit() ? Promote : Expand);
2000 } else if (Subtarget->usePopc()) {
2005
2010 } else {
2014 Subtarget->is64Bit() ? Promote : LibCall);
2016
2017 // FIXME here we don't have any ISA extensions that could help us, so to
2018 // prevent large expansions those should be made into LibCalls.
2023 }
2024
2026
2027 // Some processors have no branch predictor and have pipelines longer than
2028 // what can be covered by the delay slot. This results in a stall, so mark
2029 // branches to be expensive on those processors.
2030 setJumpIsExpensive(Subtarget->hasNoPredictor());
2031 // The high cost of branching means that using conditional moves will
2032 // still be profitable even if the condition is predictable.
2034
2036
2037 computeRegisterProperties(Subtarget->getRegisterInfo());
2038}
2039
2041 return Subtarget->useSoftFloat();
2042}
2043
2045 EVT VT) const {
2046 if (!VT.isVector())
2047 return MVT::i32;
2049}
2050
2051/// isMaskedValueZeroForTargetNode - Return true if 'Op & Mask' is known to
2052/// be zero. Op is expected to be a target specific node. Used by DAG
2053/// combiner.
2055 (const SDValue Op,
2056 KnownBits &Known,
2057 const APInt &DemandedElts,
2058 const SelectionDAG &DAG,
2059 unsigned Depth) const {
2060 KnownBits Known2;
2061 Known.resetAll();
2062
2063 switch (Op.getOpcode()) {
2064 default: break;
2065 case SPISD::SELECT_ICC:
2066 case SPISD::SELECT_XCC:
2067 case SPISD::SELECT_FCC:
2068 Known = DAG.computeKnownBits(Op.getOperand(1), Depth + 1);
2069 Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
2070
2071 // Only known if known in both the LHS and RHS.
2072 Known = Known.intersectWith(Known2);
2073 break;
2074 }
2075}
2076
2077// Look at LHS/RHS/CC and see if they are a lowered setcc instruction. If so
2078// set LHS/RHS and SPCC to the LHS/RHS of the setcc and SPCC to the condition.
2080 ISD::CondCode CC, unsigned &SPCC) {
2081 if (isNullConstant(RHS) && CC == ISD::SETNE &&
2082 (((LHS.getOpcode() == SPISD::SELECT_ICC ||
2083 LHS.getOpcode() == SPISD::SELECT_XCC) &&
2084 LHS.getOperand(3).getOpcode() == SPISD::CMPICC) ||
2085 (LHS.getOpcode() == SPISD::SELECT_FCC &&
2086 (LHS.getOperand(3).getOpcode() == SPISD::CMPFCC ||
2087 LHS.getOperand(3).getOpcode() == SPISD::CMPFCC_V9))) &&
2088 isOneConstant(LHS.getOperand(0)) && isNullConstant(LHS.getOperand(1))) {
2089 SDValue CMPCC = LHS.getOperand(3);
2090 SPCC = LHS.getConstantOperandVal(2);
2091 LHS = CMPCC.getOperand(0);
2092 RHS = CMPCC.getOperand(1);
2093 }
2094}
2095
2096// Convert to a target node and set target flags.
2098 SelectionDAG &DAG) const {
2100 return DAG.getTargetGlobalAddress(GA->getGlobal(),
2101 SDLoc(GA),
2102 GA->getValueType(0),
2103 GA->getOffset(), TF);
2104
2106 return DAG.getTargetConstantPool(CP->getConstVal(), CP->getValueType(0),
2107 CP->getAlign(), CP->getOffset(), TF);
2108
2110 return DAG.getTargetBlockAddress(BA->getBlockAddress(),
2111 Op.getValueType(),
2112 0,
2113 TF);
2114
2116 return DAG.getTargetExternalSymbol(ES->getSymbol(),
2117 ES->getValueType(0), TF);
2118
2119 llvm_unreachable("Unhandled address SDNode");
2120}
2121
2122// Split Op into high and low parts according to HiTF and LoTF.
2123// Return an ADD node combining the parts.
2125 unsigned HiTF, unsigned LoTF,
2126 SelectionDAG &DAG) const {
2127 SDLoc DL(Op);
2128 EVT VT = Op.getValueType();
2129 SDValue Hi = DAG.getNode(SPISD::Hi, DL, VT, withTargetFlags(Op, HiTF, DAG));
2130 SDValue Lo = DAG.getNode(SPISD::Lo, DL, VT, withTargetFlags(Op, LoTF, DAG));
2131 return DAG.getNode(ISD::ADD, DL, VT, Hi, Lo);
2132}
2133
2134// Build SDNodes for producing an address from a GlobalAddress, ConstantPool,
2135// or ExternalSymbol SDNode.
2137 SDLoc DL(Op);
2138 EVT VT = getPointerTy(DAG.getDataLayout());
2139
2140 // Handle PIC mode first. SPARC needs a got load for every variable!
2141 if (isPositionIndependent()) {
2142 const Module *M = DAG.getMachineFunction().getFunction().getParent();
2143 PICLevel::Level picLevel = M->getPICLevel();
2144 SDValue Idx;
2145
2146 if (picLevel == PICLevel::SmallPIC) {
2147 // This is the pic13 code model, the GOT is known to be smaller than 8KiB.
2148 Idx = DAG.getNode(SPISD::Lo, DL, Op.getValueType(),
2149 withTargetFlags(Op, ELF::R_SPARC_GOT13, DAG));
2150 } else {
2151 // This is the pic32 code model, the GOT is known to be smaller than 4GB.
2152 Idx = makeHiLoPair(Op, ELF::R_SPARC_GOT22, ELF::R_SPARC_GOT10, DAG);
2153 }
2154
2155 SDValue GlobalBase = DAG.getNode(SPISD::GLOBAL_BASE_REG, DL, VT);
2156 SDValue AbsAddr = DAG.getNode(ISD::ADD, DL, VT, GlobalBase, Idx);
2157 // GLOBAL_BASE_REG codegen'ed with call. Inform MFI that this
2158 // function has calls.
2160 MFI.setHasCalls(true);
2161 return DAG.getLoad(VT, DL, DAG.getEntryNode(), AbsAddr,
2163 }
2164
2165 // This is one of the absolute code models.
2166 switch(getTargetMachine().getCodeModel()) {
2167 default:
2168 llvm_unreachable("Unsupported absolute code model");
2169 case CodeModel::Small:
2170 // abs32.
2171 return makeHiLoPair(Op, ELF::R_SPARC_HI22, ELF::R_SPARC_LO10, DAG);
2172 case CodeModel::Medium: {
2173 // abs44.
2174 SDValue H44 = makeHiLoPair(Op, ELF::R_SPARC_H44, ELF::R_SPARC_M44, DAG);
2175 H44 = DAG.getNode(ISD::SHL, DL, VT, H44, DAG.getConstant(12, DL, MVT::i32));
2176 SDValue L44 = withTargetFlags(Op, ELF::R_SPARC_L44, DAG);
2177 L44 = DAG.getNode(SPISD::Lo, DL, VT, L44);
2178 return DAG.getNode(ISD::ADD, DL, VT, H44, L44);
2179 }
2180 case CodeModel::Large: {
2181 // abs64.
2182 SDValue Hi = makeHiLoPair(Op, ELF::R_SPARC_HH22, ELF::R_SPARC_HM10, DAG);
2183 Hi = DAG.getNode(ISD::SHL, DL, VT, Hi, DAG.getConstant(32, DL, MVT::i32));
2184 SDValue Lo = makeHiLoPair(Op, ELF::R_SPARC_HI22, ELF::R_SPARC_LO10, DAG);
2185 return DAG.getNode(ISD::ADD, DL, VT, Hi, Lo);
2186 }
2187 }
2188}
2189
2194
2199
2204
2206 SelectionDAG &DAG) const {
2207
2209 if (DAG.getTarget().useEmulatedTLS())
2210 return LowerToTLSEmulatedModel(GA, DAG);
2211
2212 SDLoc DL(GA);
2213 const GlobalValue *GV = GA->getGlobal();
2214 EVT PtrVT = getPointerTy(DAG.getDataLayout());
2215
2217
2218 if (model == TLSModel::GeneralDynamic || model == TLSModel::LocalDynamic) {
2219 unsigned HiTF =
2220 ((model == TLSModel::GeneralDynamic) ? ELF::R_SPARC_TLS_GD_HI22
2221 : ELF::R_SPARC_TLS_LDM_HI22);
2222 unsigned LoTF =
2223 ((model == TLSModel::GeneralDynamic) ? ELF::R_SPARC_TLS_GD_LO10
2224 : ELF::R_SPARC_TLS_LDM_LO10);
2225 unsigned addTF =
2226 ((model == TLSModel::GeneralDynamic) ? ELF::R_SPARC_TLS_GD_ADD
2227 : ELF::R_SPARC_TLS_LDM_ADD);
2228 unsigned callTF =
2229 ((model == TLSModel::GeneralDynamic) ? ELF::R_SPARC_TLS_GD_CALL
2230 : ELF::R_SPARC_TLS_LDM_CALL);
2231
2232 SDValue HiLo = makeHiLoPair(Op, HiTF, LoTF, DAG);
2233 SDValue Base = DAG.getNode(SPISD::GLOBAL_BASE_REG, DL, PtrVT);
2234 SDValue Argument = DAG.getNode(SPISD::TLS_ADD, DL, PtrVT, Base, HiLo,
2235 withTargetFlags(Op, addTF, DAG));
2236
2237 SDValue Chain = DAG.getEntryNode();
2238 SDValue InGlue;
2239
2240 Chain = DAG.getCALLSEQ_START(Chain, 0, 0, DL);
2241 Chain = DAG.getCopyToReg(Chain, DL, SP::O0, Argument, InGlue);
2242 InGlue = Chain.getValue(1);
2243 SDValue Callee = DAG.getTargetExternalSymbol("__tls_get_addr", PtrVT);
2244 SDValue Symbol = withTargetFlags(Op, callTF, DAG);
2245
2246 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
2247 const uint32_t *Mask = Subtarget->getRegisterInfo()->getCallPreservedMask(
2249 assert(Mask && "Missing call preserved mask for calling convention");
2250 SDValue Ops[] = {Chain,
2251 Callee,
2252 Symbol,
2253 DAG.getRegister(SP::O0, PtrVT),
2254 DAG.getRegisterMask(Mask),
2255 InGlue};
2256 Chain = DAG.getNode(SPISD::TLS_CALL, DL, NodeTys, Ops);
2257 InGlue = Chain.getValue(1);
2258 Chain = DAG.getCALLSEQ_END(Chain, 0, 0, InGlue, DL);
2259 InGlue = Chain.getValue(1);
2260 SDValue Ret = DAG.getCopyFromReg(Chain, DL, SP::O0, PtrVT, InGlue);
2261
2262 if (model != TLSModel::LocalDynamic)
2263 return Ret;
2264
2265 SDValue Hi =
2266 DAG.getNode(SPISD::Hi, DL, PtrVT,
2267 withTargetFlags(Op, ELF::R_SPARC_TLS_LDO_HIX22, DAG));
2268 SDValue Lo =
2269 DAG.getNode(SPISD::Lo, DL, PtrVT,
2270 withTargetFlags(Op, ELF::R_SPARC_TLS_LDO_LOX10, DAG));
2271 HiLo = DAG.getNode(ISD::XOR, DL, PtrVT, Hi, Lo);
2272 return DAG.getNode(SPISD::TLS_ADD, DL, PtrVT, Ret, HiLo,
2273 withTargetFlags(Op, ELF::R_SPARC_TLS_LDO_ADD, DAG));
2274 }
2275
2276 if (model == TLSModel::InitialExec) {
2277 unsigned ldTF = ((PtrVT == MVT::i64) ? ELF::R_SPARC_TLS_IE_LDX
2278 : ELF::R_SPARC_TLS_IE_LD);
2279
2280 SDValue Base = DAG.getNode(SPISD::GLOBAL_BASE_REG, DL, PtrVT);
2281
2282 // GLOBAL_BASE_REG codegen'ed with call. Inform MFI that this
2283 // function has calls.
2285 MFI.setHasCalls(true);
2286
2287 SDValue TGA = makeHiLoPair(Op, ELF::R_SPARC_TLS_IE_HI22,
2288 ELF::R_SPARC_TLS_IE_LO10, DAG);
2289 SDValue Ptr = DAG.getNode(ISD::ADD, DL, PtrVT, Base, TGA);
2290 SDValue Offset = DAG.getNode(SPISD::TLS_LD,
2291 DL, PtrVT, Ptr,
2292 withTargetFlags(Op, ldTF, DAG));
2293 return DAG.getNode(SPISD::TLS_ADD, DL, PtrVT,
2294 DAG.getRegister(SP::G7, PtrVT), Offset,
2295 withTargetFlags(Op, ELF::R_SPARC_TLS_IE_ADD, DAG));
2296 }
2297
2298 assert(model == TLSModel::LocalExec);
2299 SDValue Hi = DAG.getNode(SPISD::Hi, DL, PtrVT,
2300 withTargetFlags(Op, ELF::R_SPARC_TLS_LE_HIX22, DAG));
2301 SDValue Lo = DAG.getNode(SPISD::Lo, DL, PtrVT,
2302 withTargetFlags(Op, ELF::R_SPARC_TLS_LE_LOX10, DAG));
2303 SDValue Offset = DAG.getNode(ISD::XOR, DL, PtrVT, Hi, Lo);
2304
2305 return DAG.getNode(ISD::ADD, DL, PtrVT,
2306 DAG.getRegister(SP::G7, PtrVT), Offset);
2307}
2308
2310 ArgListTy &Args, SDValue Arg,
2311 const SDLoc &DL,
2312 SelectionDAG &DAG) const {
2314 EVT ArgVT = Arg.getValueType();
2315 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
2316
2317 if (ArgTy->isFP128Ty()) {
2318 // Create a stack object and pass the pointer to the library function.
2319 int FI = MFI.CreateStackObject(16, Align(8), false);
2320 SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
2321 Chain = DAG.getStore(Chain, DL, Arg, FIPtr, MachinePointerInfo(), Align(8));
2322 Args.emplace_back(FIPtr, PointerType::getUnqual(ArgTy->getContext()));
2323 } else {
2324 Args.emplace_back(Arg, ArgTy);
2325 }
2326 return Chain;
2327}
2328
2329SDValue
2331 const char *LibFuncName,
2332 unsigned numArgs) const {
2333
2334 ArgListTy Args;
2335
2337 auto PtrVT = getPointerTy(DAG.getDataLayout());
2338
2339 SDValue Callee = DAG.getExternalSymbol(LibFuncName, PtrVT);
2340 Type *RetTy = Op.getValueType().getTypeForEVT(*DAG.getContext());
2341 Type *RetTyABI = RetTy;
2342 SDValue Chain = DAG.getEntryNode();
2343 SDValue RetPtr;
2344
2345 if (RetTy->isFP128Ty()) {
2346 // Create a Stack Object to receive the return value of type f128.
2347 int RetFI = MFI.CreateStackObject(16, Align(8), false);
2348 RetPtr = DAG.getFrameIndex(RetFI, PtrVT);
2349 ArgListEntry Entry(RetPtr, PointerType::getUnqual(RetTy->getContext()));
2350 if (!Subtarget->is64Bit()) {
2351 Entry.IsSRet = true;
2352 Entry.IndirectType = RetTy;
2353 }
2354 Entry.IsReturned = false;
2355 Args.push_back(Entry);
2356 RetTyABI = Type::getVoidTy(*DAG.getContext());
2357 }
2358
2359 assert(Op->getNumOperands() >= numArgs && "Not enough operands!");
2360 for (unsigned i = 0, e = numArgs; i != e; ++i) {
2361 Chain = LowerF128_LibCallArg(Chain, Args, Op.getOperand(i), SDLoc(Op), DAG);
2362 }
2364 CLI.setDebugLoc(SDLoc(Op)).setChain(Chain)
2365 .setCallee(CallingConv::C, RetTyABI, Callee, std::move(Args));
2366
2367 std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
2368
2369 // chain is in second result.
2370 if (RetTyABI == RetTy)
2371 return CallInfo.first;
2372
2373 assert (RetTy->isFP128Ty() && "Unexpected return type!");
2374
2375 Chain = CallInfo.second;
2376
2377 // Load RetPtr to get the return value.
2378 return DAG.getLoad(Op.getValueType(), SDLoc(Op), Chain, RetPtr,
2380}
2381
2383 unsigned &SPCC, const SDLoc &DL,
2384 SelectionDAG &DAG) const {
2385
2386 const char *LibCall = nullptr;
2387 bool is64Bit = Subtarget->is64Bit();
2388 switch(SPCC) {
2389 default: llvm_unreachable("Unhandled conditional code!");
2390 case SPCC::FCC_E : LibCall = is64Bit? "_Qp_feq" : "_Q_feq"; break;
2391 case SPCC::FCC_NE : LibCall = is64Bit? "_Qp_fne" : "_Q_fne"; break;
2392 case SPCC::FCC_L : LibCall = is64Bit? "_Qp_flt" : "_Q_flt"; break;
2393 case SPCC::FCC_G : LibCall = is64Bit? "_Qp_fgt" : "_Q_fgt"; break;
2394 case SPCC::FCC_LE : LibCall = is64Bit? "_Qp_fle" : "_Q_fle"; break;
2395 case SPCC::FCC_GE : LibCall = is64Bit? "_Qp_fge" : "_Q_fge"; break;
2396 case SPCC::FCC_UL :
2397 case SPCC::FCC_ULE:
2398 case SPCC::FCC_UG :
2399 case SPCC::FCC_UGE:
2400 case SPCC::FCC_U :
2401 case SPCC::FCC_O :
2402 case SPCC::FCC_LG :
2403 case SPCC::FCC_UE : LibCall = is64Bit? "_Qp_cmp" : "_Q_cmp"; break;
2404 }
2405
2406 auto PtrVT = getPointerTy(DAG.getDataLayout());
2407 SDValue Callee = DAG.getExternalSymbol(LibCall, PtrVT);
2408 Type *RetTy = Type::getInt32Ty(*DAG.getContext());
2409 ArgListTy Args;
2410 SDValue Chain = DAG.getEntryNode();
2411 Chain = LowerF128_LibCallArg(Chain, Args, LHS, DL, DAG);
2412 Chain = LowerF128_LibCallArg(Chain, Args, RHS, DL, DAG);
2413
2415 CLI.setDebugLoc(DL).setChain(Chain)
2416 .setCallee(CallingConv::C, RetTy, Callee, std::move(Args));
2417
2418 std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
2419
2420 // result is in first, and chain is in second result.
2421 SDValue Result = CallInfo.first;
2422
2423 switch(SPCC) {
2424 default: {
2425 SDValue RHS = DAG.getConstant(0, DL, Result.getValueType());
2427 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2428 }
2429 case SPCC::FCC_UL : {
2430 SDValue Mask = DAG.getConstant(1, DL, Result.getValueType());
2431 Result = DAG.getNode(ISD::AND, DL, Result.getValueType(), Result, Mask);
2432 SDValue RHS = DAG.getConstant(0, DL, Result.getValueType());
2434 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2435 }
2436 case SPCC::FCC_ULE: {
2437 SDValue RHS = DAG.getConstant(2, DL, Result.getValueType());
2439 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2440 }
2441 case SPCC::FCC_UG : {
2442 SDValue RHS = DAG.getConstant(1, DL, Result.getValueType());
2443 SPCC = SPCC::ICC_G;
2444 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2445 }
2446 case SPCC::FCC_UGE: {
2447 SDValue RHS = DAG.getConstant(1, DL, Result.getValueType());
2449 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2450 }
2451
2452 case SPCC::FCC_U : {
2453 SDValue RHS = DAG.getConstant(3, DL, Result.getValueType());
2454 SPCC = SPCC::ICC_E;
2455 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2456 }
2457 case SPCC::FCC_O : {
2458 SDValue RHS = DAG.getConstant(3, DL, Result.getValueType());
2460 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2461 }
2462 case SPCC::FCC_LG : {
2463 SDValue Mask = DAG.getConstant(3, DL, Result.getValueType());
2464 Result = DAG.getNode(ISD::AND, DL, Result.getValueType(), Result, Mask);
2465 SDValue RHS = DAG.getConstant(0, DL, Result.getValueType());
2467 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2468 }
2469 case SPCC::FCC_UE : {
2470 SDValue Mask = DAG.getConstant(3, DL, Result.getValueType());
2471 Result = DAG.getNode(ISD::AND, DL, Result.getValueType(), Result, Mask);
2472 SDValue RHS = DAG.getConstant(0, DL, Result.getValueType());
2473 SPCC = SPCC::ICC_E;
2474 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2475 }
2476 }
2477}
2478
2479static SDValue
2481 const SparcTargetLowering &TLI) {
2482
2483 if (Op.getOperand(0).getValueType() == MVT::f64)
2484 return TLI.LowerF128Op(Op, DAG,
2485 TLI.getLibcallName(RTLIB::FPEXT_F64_F128), 1);
2486
2487 if (Op.getOperand(0).getValueType() == MVT::f32)
2488 return TLI.LowerF128Op(Op, DAG,
2489 TLI.getLibcallName(RTLIB::FPEXT_F32_F128), 1);
2490
2491 llvm_unreachable("fpextend with non-float operand!");
2492 return SDValue();
2493}
2494
2495static SDValue
2497 const SparcTargetLowering &TLI) {
2498 // FP_ROUND on f64 and f32 are legal.
2499 if (Op.getOperand(0).getValueType() != MVT::f128)
2500 return Op;
2501
2502 if (Op.getValueType() == MVT::f64)
2503 return TLI.LowerF128Op(Op, DAG,
2504 TLI.getLibcallName(RTLIB::FPROUND_F128_F64), 1);
2505 if (Op.getValueType() == MVT::f32)
2506 return TLI.LowerF128Op(Op, DAG,
2507 TLI.getLibcallName(RTLIB::FPROUND_F128_F32), 1);
2508
2509 llvm_unreachable("fpround to non-float!");
2510 return SDValue();
2511}
2512
2514 const SparcTargetLowering &TLI,
2515 bool hasHardQuad) {
2516 SDLoc dl(Op);
2517 EVT VT = Op.getValueType();
2518 assert(VT == MVT::i32 || VT == MVT::i64);
2519
2520 // Expand f128 operations to fp128 abi calls.
2521 if (Op.getOperand(0).getValueType() == MVT::f128
2522 && (!hasHardQuad || !TLI.isTypeLegal(VT))) {
2523 const char *libName = TLI.getLibcallName(VT == MVT::i32
2524 ? RTLIB::FPTOSINT_F128_I32
2525 : RTLIB::FPTOSINT_F128_I64);
2526 return TLI.LowerF128Op(Op, DAG, libName, 1);
2527 }
2528
2529 // Expand if the resulting type is illegal.
2530 if (!TLI.isTypeLegal(VT))
2531 return SDValue();
2532
2533 // Otherwise, Convert the fp value to integer in an FP register.
2534 if (VT == MVT::i32)
2535 Op = DAG.getNode(SPISD::FTOI, dl, MVT::f32, Op.getOperand(0));
2536 else
2537 Op = DAG.getNode(SPISD::FTOX, dl, MVT::f64, Op.getOperand(0));
2538
2539 return DAG.getNode(ISD::BITCAST, dl, VT, Op);
2540}
2541
2543 const SparcTargetLowering &TLI,
2544 bool hasHardQuad) {
2545 SDLoc dl(Op);
2546 EVT OpVT = Op.getOperand(0).getValueType();
2547 assert(OpVT == MVT::i32 || (OpVT == MVT::i64));
2548
2549 EVT floatVT = (OpVT == MVT::i32) ? MVT::f32 : MVT::f64;
2550
2551 // Expand f128 operations to fp128 ABI calls.
2552 if (Op.getValueType() == MVT::f128
2553 && (!hasHardQuad || !TLI.isTypeLegal(OpVT))) {
2554 const char *libName = TLI.getLibcallName(OpVT == MVT::i32
2555 ? RTLIB::SINTTOFP_I32_F128
2556 : RTLIB::SINTTOFP_I64_F128);
2557 return TLI.LowerF128Op(Op, DAG, libName, 1);
2558 }
2559
2560 // Expand if the operand type is illegal.
2561 if (!TLI.isTypeLegal(OpVT))
2562 return SDValue();
2563
2564 // Otherwise, Convert the int value to FP in an FP register.
2565 SDValue Tmp = DAG.getNode(ISD::BITCAST, dl, floatVT, Op.getOperand(0));
2566 unsigned opcode = (OpVT == MVT::i32)? SPISD::ITOF : SPISD::XTOF;
2567 return DAG.getNode(opcode, dl, Op.getValueType(), Tmp);
2568}
2569
2571 const SparcTargetLowering &TLI,
2572 bool hasHardQuad) {
2573 EVT VT = Op.getValueType();
2574
2575 // Expand if it does not involve f128 or the target has support for
2576 // quad floating point instructions and the resulting type is legal.
2577 if (Op.getOperand(0).getValueType() != MVT::f128 ||
2578 (hasHardQuad && TLI.isTypeLegal(VT)))
2579 return SDValue();
2580
2581 assert(VT == MVT::i32 || VT == MVT::i64);
2582
2583 return TLI.LowerF128Op(Op, DAG,
2584 TLI.getLibcallName(VT == MVT::i32
2585 ? RTLIB::FPTOUINT_F128_I32
2586 : RTLIB::FPTOUINT_F128_I64),
2587 1);
2588}
2589
2591 const SparcTargetLowering &TLI,
2592 bool hasHardQuad) {
2593 EVT OpVT = Op.getOperand(0).getValueType();
2594 assert(OpVT == MVT::i32 || OpVT == MVT::i64);
2595
2596 // Expand if it does not involve f128 or the target has support for
2597 // quad floating point instructions and the operand type is legal.
2598 if (Op.getValueType() != MVT::f128 || (hasHardQuad && TLI.isTypeLegal(OpVT)))
2599 return SDValue();
2600
2601 return TLI.LowerF128Op(Op, DAG,
2602 TLI.getLibcallName(OpVT == MVT::i32
2603 ? RTLIB::UINTTOFP_I32_F128
2604 : RTLIB::UINTTOFP_I64_F128),
2605 1);
2606}
2607
2609 const SparcTargetLowering &TLI, bool hasHardQuad,
2610 bool isV9, bool is64Bit) {
2611 SDValue Chain = Op.getOperand(0);
2612 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
2613 SDValue LHS = Op.getOperand(2);
2614 SDValue RHS = Op.getOperand(3);
2615 SDValue Dest = Op.getOperand(4);
2616 SDLoc dl(Op);
2617 unsigned Opc, SPCC = ~0U;
2618
2619 // If this is a br_cc of a "setcc", and if the setcc got lowered into
2620 // an CMP[IF]CC/SELECT_[IF]CC pair, find the original compared values.
2622 assert(LHS.getValueType() == RHS.getValueType());
2623
2624 // Get the condition flag.
2625 SDValue CompareFlag;
2626 if (LHS.getValueType().isInteger()) {
2627 // On V9 processors running in 64-bit mode, if CC compares two `i64`s
2628 // and the RHS is zero we might be able to use a specialized branch.
2629 if (is64Bit && isV9 && LHS.getValueType() == MVT::i64 &&
2631 return DAG.getNode(SPISD::BR_REG, dl, MVT::Other, Chain, Dest,
2632 DAG.getConstant(intCondCCodeToRcond(CC), dl, MVT::i32),
2633 LHS);
2634
2635 CompareFlag = DAG.getNode(SPISD::CMPICC, dl, MVT::Glue, LHS, RHS);
2636 if (SPCC == ~0U) SPCC = IntCondCCodeToICC(CC);
2637 if (isV9)
2638 // 32-bit compares use the icc flags, 64-bit uses the xcc flags.
2639 Opc = LHS.getValueType() == MVT::i32 ? SPISD::BPICC : SPISD::BPXCC;
2640 else
2641 // Non-v9 targets don't have xcc.
2642 Opc = SPISD::BRICC;
2643 } else {
2644 if (!hasHardQuad && LHS.getValueType() == MVT::f128) {
2645 if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
2646 CompareFlag = TLI.LowerF128Compare(LHS, RHS, SPCC, dl, DAG);
2647 Opc = isV9 ? SPISD::BPICC : SPISD::BRICC;
2648 } else {
2649 unsigned CmpOpc = isV9 ? SPISD::CMPFCC_V9 : SPISD::CMPFCC;
2650 CompareFlag = DAG.getNode(CmpOpc, dl, MVT::Glue, LHS, RHS);
2651 if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
2652 Opc = isV9 ? SPISD::BRFCC_V9 : SPISD::BRFCC;
2653 }
2654 }
2655 return DAG.getNode(Opc, dl, MVT::Other, Chain, Dest,
2656 DAG.getConstant(SPCC, dl, MVT::i32), CompareFlag);
2657}
2658
2660 const SparcTargetLowering &TLI, bool hasHardQuad,
2661 bool isV9, bool is64Bit) {
2662 SDValue LHS = Op.getOperand(0);
2663 SDValue RHS = Op.getOperand(1);
2664 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
2665 SDValue TrueVal = Op.getOperand(2);
2666 SDValue FalseVal = Op.getOperand(3);
2667 SDLoc dl(Op);
2668 unsigned Opc, SPCC = ~0U;
2669
2670 // If this is a select_cc of a "setcc", and if the setcc got lowered into
2671 // an CMP[IF]CC/SELECT_[IF]CC pair, find the original compared values.
2673 assert(LHS.getValueType() == RHS.getValueType());
2674
2675 SDValue CompareFlag;
2676 if (LHS.getValueType().isInteger()) {
2677 // On V9 processors running in 64-bit mode, if CC compares two `i64`s
2678 // and the RHS is zero we might be able to use a specialized select.
2679 // All SELECT_CC between any two scalar integer types are eligible for
2680 // lowering to specialized instructions. Additionally, f32 and f64 types
2681 // are also eligible, but for f128 we can only use the specialized
2682 // instruction when we have hardquad.
2683 EVT ValType = TrueVal.getValueType();
2684 bool IsEligibleType = ValType.isScalarInteger() || ValType == MVT::f32 ||
2685 ValType == MVT::f64 ||
2686 (ValType == MVT::f128 && hasHardQuad);
2687 if (is64Bit && isV9 && LHS.getValueType() == MVT::i64 &&
2688 isNullConstant(RHS) && !ISD::isUnsignedIntSetCC(CC) && IsEligibleType)
2689 return DAG.getNode(
2690 SPISD::SELECT_REG, dl, TrueVal.getValueType(), TrueVal, FalseVal,
2691 DAG.getConstant(intCondCCodeToRcond(CC), dl, MVT::i32), LHS);
2692
2693 CompareFlag = DAG.getNode(SPISD::CMPICC, dl, MVT::Glue, LHS, RHS);
2694 Opc = LHS.getValueType() == MVT::i32 ?
2695 SPISD::SELECT_ICC : SPISD::SELECT_XCC;
2696 if (SPCC == ~0U) SPCC = IntCondCCodeToICC(CC);
2697 } else {
2698 if (!hasHardQuad && LHS.getValueType() == MVT::f128) {
2699 if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
2700 CompareFlag = TLI.LowerF128Compare(LHS, RHS, SPCC, dl, DAG);
2701 Opc = SPISD::SELECT_ICC;
2702 } else {
2703 unsigned CmpOpc = isV9 ? SPISD::CMPFCC_V9 : SPISD::CMPFCC;
2704 CompareFlag = DAG.getNode(CmpOpc, dl, MVT::Glue, LHS, RHS);
2705 Opc = SPISD::SELECT_FCC;
2706 if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
2707 }
2708 }
2709 return DAG.getNode(Opc, dl, TrueVal.getValueType(), TrueVal, FalseVal,
2710 DAG.getConstant(SPCC, dl, MVT::i32), CompareFlag);
2711}
2712
2714 const SparcTargetLowering &TLI) {
2717 auto PtrVT = TLI.getPointerTy(DAG.getDataLayout());
2718
2719 // Need frame address to find the address of VarArgsFrameIndex.
2721
2722 // vastart just stores the address of the VarArgsFrameIndex slot into the
2723 // memory location argument.
2724 SDLoc DL(Op);
2725 SDValue Offset =
2726 DAG.getNode(ISD::ADD, DL, PtrVT, DAG.getRegister(SP::I6, PtrVT),
2727 DAG.getIntPtrConstant(FuncInfo->getVarArgsFrameOffset(), DL));
2728 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
2729 return DAG.getStore(Op.getOperand(0), DL, Offset, Op.getOperand(1),
2730 MachinePointerInfo(SV));
2731}
2732
2734 SDNode *Node = Op.getNode();
2735 EVT VT = Node->getValueType(0);
2736 SDValue InChain = Node->getOperand(0);
2737 SDValue VAListPtr = Node->getOperand(1);
2738 EVT PtrVT = VAListPtr.getValueType();
2739 const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
2740 SDLoc DL(Node);
2741 SDValue VAList =
2742 DAG.getLoad(PtrVT, DL, InChain, VAListPtr, MachinePointerInfo(SV));
2743 // Increment the pointer, VAList, to the next vaarg.
2744 SDValue NextPtr = DAG.getNode(ISD::ADD, DL, PtrVT, VAList,
2746 DL));
2747 // Store the incremented VAList to the legalized pointer.
2748 InChain = DAG.getStore(VAList.getValue(1), DL, NextPtr, VAListPtr,
2749 MachinePointerInfo(SV));
2750 // Load the actual argument out of the pointer VAList.
2751 // We can't count on greater alignment than the word size.
2752 return DAG.getLoad(
2753 VT, DL, InChain, VAList, MachinePointerInfo(),
2754 Align(std::min(PtrVT.getFixedSizeInBits(), VT.getFixedSizeInBits()) / 8));
2755}
2756
2758 const SparcSubtarget *Subtarget) {
2759 SDValue Chain = Op.getOperand(0);
2760 SDValue Size = Op.getOperand(1);
2761 SDValue Alignment = Op.getOperand(2);
2762 MaybeAlign MaybeAlignment =
2763 cast<ConstantSDNode>(Alignment)->getMaybeAlignValue();
2764 EVT VT = Size->getValueType(0);
2765 SDLoc dl(Op);
2766
2767 unsigned SPReg = SP::O6;
2768 SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, VT);
2769
2770 // The resultant pointer needs to be above the register spill area
2771 // at the bottom of the stack.
2772 unsigned regSpillArea;
2773 if (Subtarget->is64Bit()) {
2774 regSpillArea = 128;
2775 } else {
2776 // On Sparc32, the size of the spill area is 92. Unfortunately,
2777 // that's only 4-byte aligned, not 8-byte aligned (the stack
2778 // pointer is 8-byte aligned). So, if the user asked for an 8-byte
2779 // aligned dynamic allocation, we actually need to add 96 to the
2780 // bottom of the stack, instead of 92, to ensure 8-byte alignment.
2781
2782 // That also means adding 4 to the size of the allocation --
2783 // before applying the 8-byte rounding. Unfortunately, we the
2784 // value we get here has already had rounding applied. So, we need
2785 // to add 8, instead, wasting a bit more memory.
2786
2787 // Further, this only actually needs to be done if the required
2788 // alignment is > 4, but, we've lost that info by this point, too,
2789 // so we always apply it.
2790
2791 // (An alternative approach would be to always reserve 96 bytes
2792 // instead of the required 92, but then we'd waste 4 extra bytes
2793 // in every frame, not just those with dynamic stack allocations)
2794
2795 // TODO: modify code in SelectionDAGBuilder to make this less sad.
2796
2797 Size = DAG.getNode(ISD::ADD, dl, VT, Size,
2798 DAG.getConstant(8, dl, VT));
2799 regSpillArea = 96;
2800 }
2801
2802 int64_t Bias = Subtarget->getStackPointerBias();
2803
2804 // Debias and increment SP past the reserved spill area.
2805 // We need the SP to point to the first usable region before calculating
2806 // anything to prevent any of the pointers from becoming out of alignment when
2807 // we rebias the SP later on.
2808 SDValue StartOfUsableStack = DAG.getNode(
2809 ISD::ADD, dl, VT, SP, DAG.getConstant(regSpillArea + Bias, dl, VT));
2810 SDValue AllocatedPtr =
2811 DAG.getNode(ISD::SUB, dl, VT, StartOfUsableStack, Size);
2812
2813 bool IsOveraligned = MaybeAlignment.has_value();
2814 SDValue AlignedPtr =
2815 IsOveraligned
2816 ? DAG.getNode(ISD::AND, dl, VT, AllocatedPtr,
2817 DAG.getSignedConstant(-MaybeAlignment->value(), dl, VT))
2818 : AllocatedPtr;
2819
2820 // Now that we are done, restore the bias and reserved spill area.
2821 SDValue NewSP = DAG.getNode(ISD::SUB, dl, VT, AlignedPtr,
2822 DAG.getConstant(regSpillArea + Bias, dl, VT));
2823 Chain = DAG.getCopyToReg(SP.getValue(1), dl, SPReg, NewSP);
2824 SDValue Ops[2] = {AlignedPtr, Chain};
2825 return DAG.getMergeValues(Ops, dl);
2826}
2827
2828
2830 SDLoc dl(Op);
2831 SDValue Chain = DAG.getNode(SPISD::FLUSHW,
2832 dl, MVT::Other, DAG.getEntryNode());
2833 return Chain;
2834}
2835
2837 const SparcSubtarget *Subtarget,
2838 bool AlwaysFlush = false) {
2840 MFI.setFrameAddressIsTaken(true);
2841
2842 EVT VT = Op.getValueType();
2843 SDLoc dl(Op);
2844 unsigned FrameReg = SP::I6;
2845 unsigned stackBias = Subtarget->getStackPointerBias();
2846
2847 SDValue FrameAddr;
2848 SDValue Chain;
2849
2850 // flush first to make sure the windowed registers' values are in stack
2851 Chain = (depth || AlwaysFlush) ? getFLUSHW(Op, DAG) : DAG.getEntryNode();
2852
2853 FrameAddr = DAG.getCopyFromReg(Chain, dl, FrameReg, VT);
2854
2855 unsigned Offset = (Subtarget->is64Bit()) ? (stackBias + 112) : 56;
2856
2857 while (depth--) {
2858 SDValue Ptr = DAG.getNode(ISD::ADD, dl, VT, FrameAddr,
2859 DAG.getIntPtrConstant(Offset, dl));
2860 FrameAddr = DAG.getLoad(VT, dl, Chain, Ptr, MachinePointerInfo());
2861 }
2862 if (Subtarget->is64Bit())
2863 FrameAddr = DAG.getNode(ISD::ADD, dl, VT, FrameAddr,
2864 DAG.getIntPtrConstant(stackBias, dl));
2865 return FrameAddr;
2866}
2867
2868
2870 const SparcSubtarget *Subtarget) {
2871
2872 uint64_t depth = Op.getConstantOperandVal(0);
2873
2874 return getFRAMEADDR(depth, Op, DAG, Subtarget);
2875
2876}
2877
2879 const SparcTargetLowering &TLI,
2880 const SparcSubtarget *Subtarget) {
2882 MachineFrameInfo &MFI = MF.getFrameInfo();
2883 MFI.setReturnAddressIsTaken(true);
2884
2885 EVT VT = Op.getValueType();
2886 SDLoc dl(Op);
2887 uint64_t depth = Op.getConstantOperandVal(0);
2888
2889 SDValue RetAddr;
2890 if (depth == 0) {
2891 auto PtrVT = TLI.getPointerTy(DAG.getDataLayout());
2892 Register RetReg = MF.addLiveIn(SP::I7, TLI.getRegClassFor(PtrVT));
2893 RetAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, RetReg, VT);
2894 return RetAddr;
2895 }
2896
2897 // Need frame address to find return address of the caller.
2898 SDValue FrameAddr = getFRAMEADDR(depth - 1, Op, DAG, Subtarget, true);
2899
2900 unsigned Offset = (Subtarget->is64Bit()) ? 120 : 60;
2901 SDValue Ptr = DAG.getNode(ISD::ADD,
2902 dl, VT,
2903 FrameAddr,
2904 DAG.getIntPtrConstant(Offset, dl));
2905 RetAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), Ptr, MachinePointerInfo());
2906
2907 return RetAddr;
2908}
2909
2910static SDValue LowerF64Op(SDValue SrcReg64, const SDLoc &dl, SelectionDAG &DAG,
2911 unsigned opcode) {
2912 assert(SrcReg64.getValueType() == MVT::f64 && "LowerF64Op called on non-double!");
2913 assert(opcode == ISD::FNEG || opcode == ISD::FABS);
2914
2915 // Lower fneg/fabs on f64 to fneg/fabs on f32.
2916 // fneg f64 => fneg f32:sub_even, fmov f32:sub_odd.
2917 // fabs f64 => fabs f32:sub_even, fmov f32:sub_odd.
2918
2919 // Note: in little-endian, the floating-point value is stored in the
2920 // registers are in the opposite order, so the subreg with the sign
2921 // bit is the highest-numbered (odd), rather than the
2922 // lowest-numbered (even).
2923
2924 SDValue Hi32 = DAG.getTargetExtractSubreg(SP::sub_even, dl, MVT::f32,
2925 SrcReg64);
2926 SDValue Lo32 = DAG.getTargetExtractSubreg(SP::sub_odd, dl, MVT::f32,
2927 SrcReg64);
2928
2929 if (DAG.getDataLayout().isLittleEndian())
2930 Lo32 = DAG.getNode(opcode, dl, MVT::f32, Lo32);
2931 else
2932 Hi32 = DAG.getNode(opcode, dl, MVT::f32, Hi32);
2933
2934 SDValue DstReg64 = SDValue(DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF,
2935 dl, MVT::f64), 0);
2936 DstReg64 = DAG.getTargetInsertSubreg(SP::sub_even, dl, MVT::f64,
2937 DstReg64, Hi32);
2938 DstReg64 = DAG.getTargetInsertSubreg(SP::sub_odd, dl, MVT::f64,
2939 DstReg64, Lo32);
2940 return DstReg64;
2941}
2942
2943// Lower a f128 load into two f64 loads.
2945{
2946 SDLoc dl(Op);
2947 LoadSDNode *LdNode = cast<LoadSDNode>(Op.getNode());
2948 assert(LdNode->getOffset().isUndef() && "Unexpected node type");
2949
2950 Align Alignment = commonAlignment(LdNode->getBaseAlign(), 8);
2951
2952 SDValue Hi64 =
2953 DAG.getLoad(MVT::f64, dl, LdNode->getChain(), LdNode->getBasePtr(),
2954 LdNode->getPointerInfo(), Alignment);
2955 EVT addrVT = LdNode->getBasePtr().getValueType();
2956 SDValue LoPtr = DAG.getNode(ISD::ADD, dl, addrVT,
2957 LdNode->getBasePtr(),
2958 DAG.getConstant(8, dl, addrVT));
2959 SDValue Lo64 = DAG.getLoad(MVT::f64, dl, LdNode->getChain(), LoPtr,
2960 LdNode->getPointerInfo().getWithOffset(8),
2961 Alignment);
2962
2963 SDValue SubRegEven = DAG.getTargetConstant(SP::sub_even64, dl, MVT::i32);
2964 SDValue SubRegOdd = DAG.getTargetConstant(SP::sub_odd64, dl, MVT::i32);
2965
2966 SDNode *InFP128 = DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF,
2967 dl, MVT::f128);
2968 InFP128 = DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, dl,
2969 MVT::f128,
2970 SDValue(InFP128, 0),
2971 Hi64,
2972 SubRegEven);
2973 InFP128 = DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, dl,
2974 MVT::f128,
2975 SDValue(InFP128, 0),
2976 Lo64,
2977 SubRegOdd);
2978 SDValue OutChains[2] = { SDValue(Hi64.getNode(), 1),
2979 SDValue(Lo64.getNode(), 1) };
2980 SDValue OutChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
2981 SDValue Ops[2] = {SDValue(InFP128,0), OutChain};
2982 return DAG.getMergeValues(Ops, dl);
2983}
2984
2986{
2987 LoadSDNode *LdNode = cast<LoadSDNode>(Op.getNode());
2988
2989 EVT MemVT = LdNode->getMemoryVT();
2990 if (MemVT == MVT::f128)
2991 return LowerF128Load(Op, DAG);
2992
2993 return Op;
2994}
2995
2996// Lower a f128 store into two f64 stores.
2998 SDLoc dl(Op);
2999 StoreSDNode *StNode = cast<StoreSDNode>(Op.getNode());
3000 assert(StNode->getOffset().isUndef() && "Unexpected node type");
3001
3002 SDValue SubRegEven = DAG.getTargetConstant(SP::sub_even64, dl, MVT::i32);
3003 SDValue SubRegOdd = DAG.getTargetConstant(SP::sub_odd64, dl, MVT::i32);
3004
3005 SDNode *Hi64 = DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG,
3006 dl,
3007 MVT::f64,
3008 StNode->getValue(),
3009 SubRegEven);
3010 SDNode *Lo64 = DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG,
3011 dl,
3012 MVT::f64,
3013 StNode->getValue(),
3014 SubRegOdd);
3015
3016 Align Alignment = commonAlignment(StNode->getBaseAlign(), 8);
3017
3018 SDValue OutChains[2];
3019 OutChains[0] =
3020 DAG.getStore(StNode->getChain(), dl, SDValue(Hi64, 0),
3021 StNode->getBasePtr(), StNode->getPointerInfo(),
3022 Alignment);
3023 EVT addrVT = StNode->getBasePtr().getValueType();
3024 SDValue LoPtr = DAG.getNode(ISD::ADD, dl, addrVT,
3025 StNode->getBasePtr(),
3026 DAG.getConstant(8, dl, addrVT));
3027 OutChains[1] = DAG.getStore(StNode->getChain(), dl, SDValue(Lo64, 0), LoPtr,
3028 StNode->getPointerInfo().getWithOffset(8),
3029 Alignment);
3030 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
3031}
3032
3034{
3035 SDLoc dl(Op);
3036 StoreSDNode *St = cast<StoreSDNode>(Op.getNode());
3037
3038 EVT MemVT = St->getMemoryVT();
3039 if (MemVT == MVT::f128)
3040 return LowerF128Store(Op, DAG);
3041
3042 if (MemVT == MVT::i64) {
3043 // Custom handling for i64 stores: turn it into a bitcast and a
3044 // v2i32 store.
3045 SDValue Val = DAG.getNode(ISD::BITCAST, dl, MVT::v2i32, St->getValue());
3046 SDValue Chain = DAG.getStore(
3047 St->getChain(), dl, Val, St->getBasePtr(), St->getPointerInfo(),
3048 St->getBaseAlign(), St->getMemOperand()->getFlags(), St->getAAInfo());
3049 return Chain;
3050 }
3051
3052 return SDValue();
3053}
3054
3056 assert((Op.getOpcode() == ISD::FNEG || Op.getOpcode() == ISD::FABS)
3057 && "invalid opcode");
3058
3059 SDLoc dl(Op);
3060
3061 if (Op.getValueType() == MVT::f64)
3062 return LowerF64Op(Op.getOperand(0), dl, DAG, Op.getOpcode());
3063 if (Op.getValueType() != MVT::f128)
3064 return Op;
3065
3066 // Lower fabs/fneg on f128 to fabs/fneg on f64
3067 // fabs/fneg f128 => fabs/fneg f64:sub_even64, fmov f64:sub_odd64
3068 // (As with LowerF64Op, on little-endian, we need to negate the odd
3069 // subreg)
3070
3071 SDValue SrcReg128 = Op.getOperand(0);
3072 SDValue Hi64 = DAG.getTargetExtractSubreg(SP::sub_even64, dl, MVT::f64,
3073 SrcReg128);
3074 SDValue Lo64 = DAG.getTargetExtractSubreg(SP::sub_odd64, dl, MVT::f64,
3075 SrcReg128);
3076
3077 if (DAG.getDataLayout().isLittleEndian()) {
3078 if (isV9)
3079 Lo64 = DAG.getNode(Op.getOpcode(), dl, MVT::f64, Lo64);
3080 else
3081 Lo64 = LowerF64Op(Lo64, dl, DAG, Op.getOpcode());
3082 } else {
3083 if (isV9)
3084 Hi64 = DAG.getNode(Op.getOpcode(), dl, MVT::f64, Hi64);
3085 else
3086 Hi64 = LowerF64Op(Hi64, dl, DAG, Op.getOpcode());
3087 }
3088
3089 SDValue DstReg128 = SDValue(DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF,
3090 dl, MVT::f128), 0);
3091 DstReg128 = DAG.getTargetInsertSubreg(SP::sub_even64, dl, MVT::f128,
3092 DstReg128, Hi64);
3093 DstReg128 = DAG.getTargetInsertSubreg(SP::sub_odd64, dl, MVT::f128,
3094 DstReg128, Lo64);
3095 return DstReg128;
3096}
3097
3099 if (isStrongerThanMonotonic(cast<AtomicSDNode>(Op)->getSuccessOrdering())) {
3100 // Expand with a fence.
3101 return SDValue();
3102 }
3103
3104 // Monotonic load/stores are legal.
3105 return Op;
3106}
3107
3109 SelectionDAG &DAG) const {
3110 unsigned IntNo = Op.getConstantOperandVal(0);
3111 switch (IntNo) {
3112 default: return SDValue(); // Don't custom lower most intrinsics.
3113 case Intrinsic::thread_pointer: {
3114 EVT PtrVT = getPointerTy(DAG.getDataLayout());
3115 return DAG.getRegister(SP::G7, PtrVT);
3116 }
3117 }
3118}
3119
3122
3123 bool hasHardQuad = Subtarget->hasHardQuad();
3124 bool isV9 = Subtarget->isV9();
3125 bool is64Bit = Subtarget->is64Bit();
3126
3127 switch (Op.getOpcode()) {
3128 default: llvm_unreachable("Should not custom lower this!");
3129
3130 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG, *this,
3131 Subtarget);
3132 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG,
3133 Subtarget);
3135 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
3136 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
3137 case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
3138 case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG, *this,
3139 hasHardQuad);
3140 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG, *this,
3141 hasHardQuad);
3142 case ISD::FP_TO_UINT: return LowerFP_TO_UINT(Op, DAG, *this,
3143 hasHardQuad);
3144 case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG, *this,
3145 hasHardQuad);
3146 case ISD::BR_CC:
3147 return LowerBR_CC(Op, DAG, *this, hasHardQuad, isV9, is64Bit);
3148 case ISD::SELECT_CC:
3149 return LowerSELECT_CC(Op, DAG, *this, hasHardQuad, isV9, is64Bit);
3150 case ISD::VASTART: return LowerVASTART(Op, DAG, *this);
3151 case ISD::VAARG: return LowerVAARG(Op, DAG);
3152 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG,
3153 Subtarget);
3154
3155 case ISD::LOAD: return LowerLOAD(Op, DAG);
3156 case ISD::STORE: return LowerSTORE(Op, DAG);
3157 case ISD::FADD: return LowerF128Op(Op, DAG,
3158 getLibcallName(RTLIB::ADD_F128), 2);
3159 case ISD::FSUB: return LowerF128Op(Op, DAG,
3160 getLibcallName(RTLIB::SUB_F128), 2);
3161 case ISD::FMUL: return LowerF128Op(Op, DAG,
3162 getLibcallName(RTLIB::MUL_F128), 2);
3163 case ISD::FDIV: return LowerF128Op(Op, DAG,
3164 getLibcallName(RTLIB::DIV_F128), 2);
3165 case ISD::FSQRT: return LowerF128Op(Op, DAG,
3166 getLibcallName(RTLIB::SQRT_F128),1);
3167 case ISD::FABS:
3168 case ISD::FNEG: return LowerFNEGorFABS(Op, DAG, isV9);
3169 case ISD::FP_EXTEND: return LowerF128_FPEXTEND(Op, DAG, *this);
3170 case ISD::FP_ROUND: return LowerF128_FPROUND(Op, DAG, *this);
3171 case ISD::ATOMIC_LOAD:
3172 case ISD::ATOMIC_STORE: return LowerATOMIC_LOAD_STORE(Op, DAG);
3174 }
3175}
3176
3178 const SDLoc &DL,
3179 SelectionDAG &DAG) const {
3180 APInt V = C->getValueAPF().bitcastToAPInt();
3181 SDValue Lo = DAG.getConstant(V.zextOrTrunc(32), DL, MVT::i32);
3182 SDValue Hi = DAG.getConstant(V.lshr(32).zextOrTrunc(32), DL, MVT::i32);
3183 if (DAG.getDataLayout().isLittleEndian())
3184 std::swap(Lo, Hi);
3185 return DAG.getBuildVector(MVT::v2i32, DL, {Hi, Lo});
3186}
3187
3189 DAGCombinerInfo &DCI) const {
3190 SDLoc dl(N);
3191 SDValue Src = N->getOperand(0);
3192
3193 if (isa<ConstantFPSDNode>(Src) && N->getSimpleValueType(0) == MVT::v2i32 &&
3194 Src.getSimpleValueType() == MVT::f64)
3196
3197 return SDValue();
3198}
3199
3201 DAGCombinerInfo &DCI) const {
3202 switch (N->getOpcode()) {
3203 default:
3204 break;
3205 case ISD::BITCAST:
3206 return PerformBITCASTCombine(N, DCI);
3207 }
3208 return SDValue();
3209}
3210
3213 MachineBasicBlock *BB) const {
3214 switch (MI.getOpcode()) {
3215 default: llvm_unreachable("Unknown SELECT_CC!");
3216 case SP::SELECT_CC_Int_ICC:
3217 case SP::SELECT_CC_FP_ICC:
3218 case SP::SELECT_CC_DFP_ICC:
3219 case SP::SELECT_CC_QFP_ICC:
3220 if (Subtarget->isV9())
3221 return expandSelectCC(MI, BB, SP::BPICC);
3222 return expandSelectCC(MI, BB, SP::BCOND);
3223 case SP::SELECT_CC_Int_XCC:
3224 case SP::SELECT_CC_FP_XCC:
3225 case SP::SELECT_CC_DFP_XCC:
3226 case SP::SELECT_CC_QFP_XCC:
3227 return expandSelectCC(MI, BB, SP::BPXCC);
3228 case SP::SELECT_CC_Int_FCC:
3229 case SP::SELECT_CC_FP_FCC:
3230 case SP::SELECT_CC_DFP_FCC:
3231 case SP::SELECT_CC_QFP_FCC:
3232 if (Subtarget->isV9())
3233 return expandSelectCC(MI, BB, SP::FBCOND_V9);
3234 return expandSelectCC(MI, BB, SP::FBCOND);
3235 }
3236}
3237
3240 unsigned BROpcode) const {
3241 const TargetInstrInfo &TII = *Subtarget->getInstrInfo();
3242 DebugLoc dl = MI.getDebugLoc();
3243 unsigned CC = (SPCC::CondCodes)MI.getOperand(3).getImm();
3244
3245 // To "insert" a SELECT_CC instruction, we actually have to insert the
3246 // triangle control-flow pattern. The incoming instruction knows the
3247 // destination vreg to set, the condition code register to branch on, the
3248 // true/false values to select between, and the condition code for the branch.
3249 //
3250 // We produce the following control flow:
3251 // ThisMBB
3252 // | \
3253 // | IfFalseMBB
3254 // | /
3255 // SinkMBB
3256 const BasicBlock *LLVM_BB = BB->getBasicBlock();
3258
3259 MachineBasicBlock *ThisMBB = BB;
3260 MachineFunction *F = BB->getParent();
3261 MachineBasicBlock *IfFalseMBB = F->CreateMachineBasicBlock(LLVM_BB);
3262 MachineBasicBlock *SinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
3263 F->insert(It, IfFalseMBB);
3264 F->insert(It, SinkMBB);
3265
3266 // Transfer the remainder of ThisMBB and its successor edges to SinkMBB.
3267 SinkMBB->splice(SinkMBB->begin(), ThisMBB,
3268 std::next(MachineBasicBlock::iterator(MI)), ThisMBB->end());
3269 SinkMBB->transferSuccessorsAndUpdatePHIs(ThisMBB);
3270
3271 // Set the new successors for ThisMBB.
3272 ThisMBB->addSuccessor(IfFalseMBB);
3273 ThisMBB->addSuccessor(SinkMBB);
3274
3275 BuildMI(ThisMBB, dl, TII.get(BROpcode))
3276 .addMBB(SinkMBB)
3277 .addImm(CC);
3278
3279 // IfFalseMBB just falls through to SinkMBB.
3280 IfFalseMBB->addSuccessor(SinkMBB);
3281
3282 // %Result = phi [ %TrueValue, ThisMBB ], [ %FalseValue, IfFalseMBB ]
3283 BuildMI(*SinkMBB, SinkMBB->begin(), dl, TII.get(SP::PHI),
3284 MI.getOperand(0).getReg())
3285 .addReg(MI.getOperand(1).getReg())
3286 .addMBB(ThisMBB)
3287 .addReg(MI.getOperand(2).getReg())
3288 .addMBB(IfFalseMBB);
3289
3290 MI.eraseFromParent(); // The pseudo instruction is gone now.
3291 return SinkMBB;
3292}
3293
3294//===----------------------------------------------------------------------===//
3295// Sparc Inline Assembly Support
3296//===----------------------------------------------------------------------===//
3297
3298/// getConstraintType - Given a constraint letter, return the type of
3299/// constraint it is for this target.
3302 if (Constraint.size() == 1) {
3303 switch (Constraint[0]) {
3304 default: break;
3305 case 'r':
3306 case 'f':
3307 case 'e':
3308 return C_RegisterClass;
3309 case 'I': // SIMM13
3310 return C_Immediate;
3311 }
3312 }
3313
3314 return TargetLowering::getConstraintType(Constraint);
3315}
3316
3319 const char *constraint) const {
3321 Value *CallOperandVal = info.CallOperandVal;
3322 // If we don't have a value, we can't do a match,
3323 // but allow it at the lowest weight.
3324 if (!CallOperandVal)
3325 return CW_Default;
3326
3327 // Look at the constraint type.
3328 switch (*constraint) {
3329 default:
3331 break;
3332 case 'I': // SIMM13
3333 if (ConstantInt *C = dyn_cast<ConstantInt>(info.CallOperandVal)) {
3334 if (isInt<13>(C->getSExtValue()))
3335 weight = CW_Constant;
3336 }
3337 break;
3338 }
3339 return weight;
3340}
3341
3342/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
3343/// vector. If it is invalid, don't add anything to Ops.
3345 SDValue Op, StringRef Constraint, std::vector<SDValue> &Ops,
3346 SelectionDAG &DAG) const {
3347 SDValue Result;
3348
3349 // Only support length 1 constraints for now.
3350 if (Constraint.size() > 1)
3351 return;
3352
3353 char ConstraintLetter = Constraint[0];
3354 switch (ConstraintLetter) {
3355 default: break;
3356 case 'I':
3358 if (isInt<13>(C->getSExtValue())) {
3359 Result = DAG.getSignedTargetConstant(C->getSExtValue(), SDLoc(Op),
3360 Op.getValueType());
3361 break;
3362 }
3363 return;
3364 }
3365 }
3366
3367 if (Result.getNode()) {
3368 Ops.push_back(Result);
3369 return;
3370 }
3372}
3373
3374std::pair<unsigned, const TargetRegisterClass *>
3376 StringRef Constraint,
3377 MVT VT) const {
3378 if (Constraint.empty())
3379 return std::make_pair(0U, nullptr);
3380
3381 if (Constraint.size() == 1) {
3382 switch (Constraint[0]) {
3383 case 'r':
3384 if (VT == MVT::v2i32)
3385 return std::make_pair(0U, &SP::IntPairRegClass);
3386 else if (Subtarget->is64Bit())
3387 return std::make_pair(0U, &SP::I64RegsRegClass);
3388 else
3389 return std::make_pair(0U, &SP::IntRegsRegClass);
3390 case 'f':
3391 if (VT == MVT::f32 || VT == MVT::i32)
3392 return std::make_pair(0U, &SP::FPRegsRegClass);
3393 else if (VT == MVT::f64 || VT == MVT::i64)
3394 return std::make_pair(0U, &SP::LowDFPRegsRegClass);
3395 else if (VT == MVT::f128)
3396 return std::make_pair(0U, &SP::LowQFPRegsRegClass);
3397 // This will generate an error message
3398 return std::make_pair(0U, nullptr);
3399 case 'e':
3400 if (VT == MVT::f32 || VT == MVT::i32)
3401 return std::make_pair(0U, &SP::FPRegsRegClass);
3402 else if (VT == MVT::f64 || VT == MVT::i64 )
3403 return std::make_pair(0U, &SP::DFPRegsRegClass);
3404 else if (VT == MVT::f128)
3405 return std::make_pair(0U, &SP::QFPRegsRegClass);
3406 // This will generate an error message
3407 return std::make_pair(0U, nullptr);
3408 }
3409 }
3410
3411 if (Constraint.front() != '{')
3412 return std::make_pair(0U, nullptr);
3413
3414 assert(Constraint.back() == '}' && "Not a brace enclosed constraint?");
3415 StringRef RegName(Constraint.data() + 1, Constraint.size() - 2);
3416 if (RegName.empty())
3417 return std::make_pair(0U, nullptr);
3418
3419 unsigned long long RegNo;
3420 // Handle numbered register aliases.
3421 if (RegName[0] == 'r' &&
3422 getAsUnsignedInteger(RegName.begin() + 1, 10, RegNo)) {
3423 // r0-r7 -> g0-g7
3424 // r8-r15 -> o0-o7
3425 // r16-r23 -> l0-l7
3426 // r24-r31 -> i0-i7
3427 if (RegNo > 31)
3428 return std::make_pair(0U, nullptr);
3429 const char RegTypes[] = {'g', 'o', 'l', 'i'};
3430 char RegType = RegTypes[RegNo / 8];
3431 char RegIndex = '0' + (RegNo % 8);
3432 char Tmp[] = {'{', RegType, RegIndex, '}', 0};
3433 return getRegForInlineAsmConstraint(TRI, Tmp, VT);
3434 }
3435
3436 // Rewrite the fN constraint according to the value type if needed.
3437 if (VT != MVT::f32 && VT != MVT::Other && RegName[0] == 'f' &&
3438 getAsUnsignedInteger(RegName.begin() + 1, 10, RegNo)) {
3439 if (VT == MVT::f64 && (RegNo % 2 == 0)) {
3441 TRI, StringRef("{d" + utostr(RegNo / 2) + "}"), VT);
3442 } else if (VT == MVT::f128 && (RegNo % 4 == 0)) {
3444 TRI, StringRef("{q" + utostr(RegNo / 4) + "}"), VT);
3445 } else {
3446 return std::make_pair(0U, nullptr);
3447 }
3448 }
3449
3450 auto ResultPair =
3452 if (!ResultPair.second)
3453 return std::make_pair(0U, nullptr);
3454
3455 // Force the use of I64Regs over IntRegs for 64-bit values.
3456 if (Subtarget->is64Bit() && VT == MVT::i64) {
3457 assert(ResultPair.second == &SP::IntRegsRegClass &&
3458 "Unexpected register class");
3459 return std::make_pair(ResultPair.first, &SP::I64RegsRegClass);
3460 }
3461
3462 return ResultPair;
3463}
3464
3465bool
3467 // The Sparc target isn't yet aware of offsets.
3468 return false;
3469}
3470
3473 SelectionDAG &DAG) const {
3474
3475 SDLoc dl(N);
3476
3477 RTLIB::Libcall libCall = RTLIB::UNKNOWN_LIBCALL;
3478
3479 switch (N->getOpcode()) {
3480 default:
3481 llvm_unreachable("Do not know how to custom type legalize this operation!");
3482
3483 case ISD::FP_TO_SINT:
3484 case ISD::FP_TO_UINT:
3485 // Custom lower only if it involves f128 or i64.
3486 if (N->getOperand(0).getValueType() != MVT::f128
3487 || N->getValueType(0) != MVT::i64)
3488 return;
3489 libCall = ((N->getOpcode() == ISD::FP_TO_SINT)
3490 ? RTLIB::FPTOSINT_F128_I64
3491 : RTLIB::FPTOUINT_F128_I64);
3492
3493 Results.push_back(LowerF128Op(SDValue(N, 0),
3494 DAG,
3495 getLibcallName(libCall),
3496 1));
3497 return;
3498 case ISD::READCYCLECOUNTER: {
3499 assert(Subtarget->hasLeonCycleCounter());
3500 SDValue Lo = DAG.getCopyFromReg(N->getOperand(0), dl, SP::ASR23, MVT::i32);
3501 SDValue Hi = DAG.getCopyFromReg(Lo, dl, SP::G0, MVT::i32);
3502 SDValue Ops[] = { Lo, Hi };
3503 SDValue Pair = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Ops);
3504 Results.push_back(Pair);
3505 Results.push_back(N->getOperand(0));
3506 return;
3507 }
3508 case ISD::SINT_TO_FP:
3509 case ISD::UINT_TO_FP:
3510 // Custom lower only if it involves f128 or i64.
3511 if (N->getValueType(0) != MVT::f128
3512 || N->getOperand(0).getValueType() != MVT::i64)
3513 return;
3514
3515 libCall = ((N->getOpcode() == ISD::SINT_TO_FP)
3516 ? RTLIB::SINTTOFP_I64_F128
3517 : RTLIB::UINTTOFP_I64_F128);
3518
3519 Results.push_back(LowerF128Op(SDValue(N, 0),
3520 DAG,
3521 getLibcallName(libCall),
3522 1));
3523 return;
3524 case ISD::LOAD: {
3526 // Custom handling only for i64: turn i64 load into a v2i32 load,
3527 // and a bitcast.
3528 if (Ld->getValueType(0) != MVT::i64 || Ld->getMemoryVT() != MVT::i64)
3529 return;
3530
3531 SDLoc dl(N);
3532 SDValue LoadRes = DAG.getExtLoad(
3533 Ld->getExtensionType(), dl, MVT::v2i32, Ld->getChain(),
3534 Ld->getBasePtr(), Ld->getPointerInfo(), MVT::v2i32, Ld->getBaseAlign(),
3535 Ld->getMemOperand()->getFlags(), Ld->getAAInfo());
3536
3537 SDValue Res = DAG.getNode(ISD::BITCAST, dl, MVT::i64, LoadRes);
3538 Results.push_back(Res);
3539 Results.push_back(LoadRes.getValue(1));
3540 return;
3541 }
3542 }
3543}
3544
3545// Override to enable LOAD_STACK_GUARD lowering on Linux.
3547 if (!Subtarget->getTargetTriple().isOSLinux())
3549 return true;
3550}
3551
3553 if (Subtarget->isVIS3())
3554 return VT == MVT::f32 || VT == MVT::f64;
3555 return false;
3556}
3557
3559 bool ForCodeSize) const {
3560 if (VT != MVT::f32 && VT != MVT::f64)
3561 return false;
3562 if (Subtarget->isVIS() && Imm.isZero())
3563 return true;
3564 if (Subtarget->isVIS3())
3565 return Imm.isExactlyValue(+0.5) || Imm.isExactlyValue(-0.5) ||
3566 Imm.getExactLog2Abs() == -1;
3567 return false;
3568}
3569
3570bool SparcTargetLowering::isCtlzFast() const { return Subtarget->isVIS3(); }
3571
3573 // We lack native cttz, however,
3574 // On 64-bit targets it is cheap to implement it in terms of popc.
3575 if (Subtarget->is64Bit() && Subtarget->usePopc())
3576 return true;
3577 // Otherwise, implementing cttz in terms of ctlz is still cheap.
3578 return isCheapToSpeculateCtlz(Ty);
3579}
3580
3582 EVT VT) const {
3583 return Subtarget->isUA2007() && !Subtarget->useSoftFloat();
3584}
3585
3587 SDNode *Node) const {
3588 assert(MI.getOpcode() == SP::SUBCCrr || MI.getOpcode() == SP::SUBCCri);
3589 // If the result is dead, replace it with %g0.
3590 if (!Node->hasAnyUseOfValue(0))
3591 MI.getOperand(0).setReg(SP::G0);
3592}
3593
3595 Instruction *Inst,
3596 AtomicOrdering Ord) const {
3597 bool HasStoreSemantics =
3599 if (HasStoreSemantics && isReleaseOrStronger(Ord))
3600 return Builder.CreateFence(AtomicOrdering::Release);
3601 return nullptr;
3602}
3603
3605 Instruction *Inst,
3606 AtomicOrdering Ord) const {
3607 // V8 loads already come with implicit acquire barrier so there's no need to
3608 // emit it again.
3609 bool HasLoadSemantics = isa<AtomicCmpXchgInst, AtomicRMWInst, LoadInst>(Inst);
3610 if (Subtarget->isV9() && HasLoadSemantics && isAcquireOrStronger(Ord))
3611 return Builder.CreateFence(AtomicOrdering::Acquire);
3612
3613 // SC plain stores would need a trailing full barrier.
3615 return Builder.CreateFence(Ord);
3616 return nullptr;
3617}
return SDValue()
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG)
static SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG, const ARMSubtarget *Subtarget)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Function Alias Analysis Results
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
Module.h This file contains the declarations for the Module class.
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
#define RegName(no)
static LPCC::CondCode IntCondCCodeToICC(SDValue CC, const SDLoc &DL, SDValue &RHS, SelectionDAG &DAG)
lazy value info
#define F(x, y, z)
Definition MD5.cpp:54
#define G(x, y, z)
Definition MD5.cpp:55
Register Reg
Register const TargetRegisterInfo * TRI
Promote Memory to Register
Definition Mem2Reg.cpp:110
static CodeModel::Model getCodeModel(const PPCSubtarget &S, const TargetMachine &TM, const MachineOperand &MO)
static constexpr MCPhysReg SPReg
static SDValue LowerFP_TO_UINT(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, bool hasHardQuad)
static bool CC_Sparc_Assign_Ret_Split_64(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
static SDValue LowerUINT_TO_FP(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, bool hasHardQuad)
static bool CC_Sparc_Assign_Split_64(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
static SDValue getFRAMEADDR(uint64_t depth, SDValue Op, SelectionDAG &DAG, const SparcSubtarget *Subtarget, bool AlwaysFlush=false)
static unsigned toCallerWindow(unsigned Reg)
static SDValue LowerF128Store(SDValue Op, SelectionDAG &DAG)
static SPCC::CondCodes intCondCCodeToRcond(ISD::CondCode CC)
intCondCCodeToRcond - Convert a DAG integer condition code to a SPARC rcond condition.
static SDValue LowerLOAD(SDValue Op, SelectionDAG &DAG)
static void fixupVariableFloatArgs(SmallVectorImpl< CCValAssign > &ArgLocs, ArrayRef< ISD::OutputArg > Outs)
static SDValue LowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, bool hasHardQuad)
static SPCC::CondCodes FPCondCCodeToFCC(ISD::CondCode CC)
FPCondCCodeToFCC - Convert a DAG floatingp oint condition code to a SPARC FCC condition.
static bool isAnyArgRegReserved(const SparcRegisterInfo *TRI, const MachineFunction &MF)
static SDValue getFLUSHW(SDValue Op, SelectionDAG &DAG)
static bool hasReturnsTwiceAttr(SelectionDAG &DAG, SDValue Callee, const CallBase *Call)
static SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG, const SparcSubtarget *Subtarget)
static SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG, const SparcSubtarget *Subtarget)
static SDValue LowerF128_FPROUND(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI)
static SDValue LowerF64Op(SDValue SrcReg64, const SDLoc &dl, SelectionDAG &DAG, unsigned opcode)
static bool RetCC_Sparc64_Full(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
static SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, bool hasHardQuad, bool isV9, bool is64Bit)
static void emitReservedArgRegCallError(const MachineFunction &MF)
static SDValue LowerATOMIC_LOAD_STORE(SDValue Op, SelectionDAG &DAG)
static bool RetCC_Sparc64_Half(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
static SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, bool hasHardQuad, bool isV9, bool is64Bit)
static SDValue LowerF128_FPEXTEND(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI)
static SDValue LowerFNEGorFABS(SDValue Op, SelectionDAG &DAG, bool isV9)
static SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG)
static bool CC_Sparc64_Half(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
static bool CC_Sparc64_Full(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
static bool CC_Sparc_Assign_SRet(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
static bool Analyze_CC_Sparc64_Half(bool IsReturn, unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
static SDValue LowerF128Load(SDValue Op, SelectionDAG &DAG)
static SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, const SparcSubtarget *Subtarget)
static void LookThroughSetCC(SDValue &LHS, SDValue &RHS, ISD::CondCode CC, unsigned &SPCC)
static bool Analyze_CC_Sparc64_Full(bool IsReturn, unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
static SDValue LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, bool hasHardQuad)
This file contains some functions that are useful when dealing with strings.
This file implements the StringSwitch template, which mimics a switch() statement whose cases are str...
This file describes how to lower LLVM code to machine code.
static bool is64Bit(const char *name)
Value * RHS
Value * LHS
Class for arbitrary precision integers.
Definition APInt.h:78
This class represents an incoming formal argument to a Function.
Definition Argument.h:32
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
an instruction that atomically reads a memory location, combines it with another value,...
BinOp getOperation() const
LLVM Basic Block Representation.
Definition BasicBlock.h:62
CCState - This class holds information needed while lowering arguments and return values.
unsigned getFirstUnallocated(ArrayRef< MCPhysReg > Regs) const
getFirstUnallocated - Return the index of the first unallocated register in the set,...
LLVM_ABI void AnalyzeCallResult(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
AnalyzeCallResult - Analyze the return values of a call, incorporating info about the passed values i...
LLVM_ABI bool CheckReturn(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
CheckReturn - Analyze the return values of a function, returning true if the return can be performed ...
LLVM_ABI void AnalyzeReturn(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
AnalyzeReturn - Analyze the returned values of a return, incorporating info about the result values i...
LLVM_ABI void AnalyzeCallOperands(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
AnalyzeCallOperands - Analyze the outgoing arguments to a call, incorporating info about the passed v...
uint64_t getStackSize() const
Returns the size of the currently allocated portion of the stack.
LLVM_ABI void AnalyzeFormalArguments(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
AnalyzeFormalArguments - Analyze an array of argument values, incorporating info about the formals in...
CCValAssign - Represent assignment of one arg/retval to a location.
Register getLocReg() const
LocInfo getLocInfo() const
static CCValAssign getReg(unsigned ValNo, MVT ValVT, MCRegister Reg, MVT LocVT, LocInfo HTP, bool IsCustom=false)
static CCValAssign getCustomReg(unsigned ValNo, MVT ValVT, MCRegister Reg, MVT LocVT, LocInfo HTP)
static CCValAssign getMem(unsigned ValNo, MVT ValVT, int64_t Offset, MVT LocVT, LocInfo HTP, bool IsCustom=false)
bool needsCustom() const
bool isExtInLoc() const
int64_t getLocMemOffset() const
static CCValAssign getCustomMem(unsigned ValNo, MVT ValVT, int64_t Offset, MVT LocVT, LocInfo HTP)
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
This is the shared class of boolean and integer constants.
Definition Constants.h:87
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:63
bool isLittleEndian() const
Layout endianness...
Definition DataLayout.h:207
LLVM_ABI TypeSize getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
A debug info location.
Definition DebugLoc.h:124
Diagnostic information for unsupported feature in backend.
const Function & getFunction() const
Definition Function.h:164
bool hasStructRetAttr() const
Determine if the function returns a structure through first or second pointer argument.
Definition Function.h:687
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition Function.cpp:730
const GlobalValue * getGlobal() const
Module * getParent()
Get the module that this global value is contained inside of...
Common base class shared among various IRBuilders.
Definition IRBuilder.h:114
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
This class is used to represent ISD::LOAD nodes.
const SDValue & getBasePtr() const
const SDValue & getOffset() const
ISD::LoadExtType getExtensionType() const
Return whether this is a plain node, or one of the varieties of value-extending loads.
Machine Value Type.
static auto integer_fixedlen_vector_valuetypes()
static auto integer_valuetypes()
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
static MVT getIntegerVT(unsigned BitWidth)
static auto fp_valuetypes()
LLVM_ABI void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
LLVM_ABI void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
MachineInstrBundleIterator< MachineInstr > iterator
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
LLVM_ABI int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
LLVM_ABI int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it.
void setFrameAddressIsTaken(bool T)
void setHasTailCall(bool V=true)
void setReturnAddressIsTaken(bool s)
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
BasicBlockListType::iterator iterator
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
Register addLiveIn(MCRegister PReg, const TargetRegisterClass *RC)
addLiveIn - Add the specified physical register as a live-in value and create a corresponding virtual...
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
Representation of each machine instruction.
Flags getFlags() const
Return the raw flags of the source value,.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLVM_ABI Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
void addLiveIn(MCRegister Reg, Register vreg=Register())
addLiveIn - Add the specified register as a live-in.
Align getBaseAlign() const
Returns alignment and volatility of the memory access.
AAMDNodes getAAInfo() const
Returns the AA info that describes the dereference.
MachineMemOperand * getMemOperand() const
Return a MachineMemOperand object describing the memory reference performed by operation.
const MachinePointerInfo & getPointerInfo() const
const SDValue & getChain() const
EVT getMemoryVT() const
Return the type of the in-memory value.
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
static PointerType * getUnqual(Type *ElementType)
This constructs a pointer to an object of the specified type in the default address space (address sp...
Wrapper class representing virtual and physical registers.
Definition Register.h:20
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
bool isUndef() const
SDNode * getNode() const
get the SDNode which holds the desired result
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
const SDValue & getOperand(unsigned i) const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
LLVM_ABI SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
const SDValue & getRoot() const
Return the root tag of the SelectionDAG.
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, Register Reg, SDValue N)
LLVM_ABI SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
LLVM_ABI SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
LLVM_ABI MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s),...
LLVM_ABI SDValue getRegister(Register Reg, EVT VT)
LLVM_ABI SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
LLVM_ABI SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, bool AlwaysInline, const CallInst *CI, std::optional< bool > OverrideTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo=AAMDNodes(), BatchAAResults *BatchAA=nullptr)
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it's not CSE'd).
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, Register Reg, EVT VT)
const DataLayout & getDataLayout() const
LLVM_ABI SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
LLVM_ABI SDValue getMemBasePlusOffset(SDValue Base, TypeSize Offset, const SDLoc &DL, const SDNodeFlags Flags=SDNodeFlags())
Returns sum of the base pointer and offset.
SDValue getSignedTargetConstant(int64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
LLVM_ABI SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
LLVM_ABI SDValue getSignedConstant(int64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)
Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...
LLVM_ABI SDValue getTargetExtractSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand)
A convenience function for creating TargetInstrInfo::EXTRACT_SUBREG nodes.
LLVM_ABI SDValue getExternalSymbol(const char *Sym, EVT VT)
const TargetMachine & getTarget() const
LLVM_ABI SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
LLVM_ABI SDValue getValueType(EVT)
LLVM_ABI SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
SDValue getTargetBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, unsigned TargetFlags=0)
MachineFunction & getMachineFunction() const
LLVM_ABI SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
LLVM_ABI KnownBits computeKnownBits(SDValue Op, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in Known.
LLVM_ABI SDValue getRegisterMask(const uint32_t *RegMask)
LLVMContext * getContext() const
LLVM_ABI SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned TargetFlags=0)
LLVM_ABI SDValue CreateStackTemporary(TypeSize Bytes, Align Alignment)
Create a stack temporary based on the size in bytes and the alignment.
SDValue getTargetConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offset=0, unsigned TargetFlags=0)
LLVM_ABI SDValue getTargetInsertSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand, SDValue Subreg)
A convenience function for creating TargetInstrInfo::INSERT_SUBREG nodes.
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
int64_t getStackPointerBias() const
The 64-bit ABI uses biased stack and frame pointers, so the stack frame of the current function is th...
void ReplaceNodeResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override
This callback is invoked when a node result type is illegal for the target, and the operation was reg...
SDValue withTargetFlags(SDValue Op, unsigned TF, SelectionDAG &DAG) const
bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF, bool isVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, LLVMContext &Context, const Type *RetTy) const override
This hook should be implemented to check whether the return values described by the Outs array can fi...
bool useSoftFloat() const override
SDValue bitcastConstantFPToInt(ConstantFPSDNode *C, const SDLoc &DL, SelectionDAG &DAG) const
MachineBasicBlock * expandSelectCC(MachineInstr &MI, MachineBasicBlock *BB, unsigned BROpcode) const
bool isFPImmLegal(const APFloat &Imm, EVT VT, bool ForCodeSize) const override
Returns true if the target can instruction select the specified FP immediate natively.
ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const override
Examine constraint string and operand type and determine a weight value.
AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override
Returns how the IR-level AtomicExpand pass should expand the given AtomicRMW, if at all.
std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const override
Given a physical register constraint (e.g.
bool isCtlzFast() const override
Return true if ctlz instruction is fast.
SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::InputArg > &Ins, const SDLoc &dl, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals) const override
This hook must be implemented to lower the incoming (formal) arguments, described by the Ins array,...
ConstraintType getConstraintType(StringRef Constraint) const override
getConstraintType - Given a constraint letter, return the type of constraint it is for this target.
SDValue LowerFormalArguments_32(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::InputArg > &Ins, const SDLoc &dl, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals) const
LowerFormalArguments32 - V8 uses a very simple ABI, where all values are passed in either one or two ...
bool isCheapToSpeculateCtlz(Type *Ty) const override
Return true if it is cheap to speculate a call to intrinsic ctlz.
SDValue LowerCall(TargetLowering::CallLoweringInfo &CLI, SmallVectorImpl< SDValue > &InVals) const override
This hook must be implemented to lower calls into the specified DAG.
bool isCheapToSpeculateCttz(Type *Ty) const override
Return true if it is cheap to speculate a call to intrinsic cttz.
bool IsEligibleForTailCallOptimization(CCState &CCInfo, CallLoweringInfo &CLI, MachineFunction &MF) const
IsEligibleForTailCallOptimization - Check whether the call is eligible for tail call optimization.
bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override
Return true if folding a constant offset with the given GlobalAddress is legal.
bool isFNegFree(EVT VT) const override
Return true if an fneg operation is free to the point where it is never worthwhile to replace it with...
SDValue LowerF128_LibCallArg(SDValue Chain, ArgListTy &Args, SDValue Arg, const SDLoc &DL, SelectionDAG &DAG) const
SDValue makeHiLoPair(SDValue Op, unsigned HiTF, unsigned LoTF, SelectionDAG &DAG) const
SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const
SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const
Instruction * emitLeadingFence(IRBuilderBase &Builder, Instruction *Inst, AtomicOrdering Ord) const override
Inserts in the IR a target-specific intrinsic specifying a fence.
void AdjustInstrPostInstrSelection(MachineInstr &MI, SDNode *Node) const override
This method should be implemented by targets that mark instructions with the 'hasPostISelHook' flag.
void computeKnownBitsForTargetNode(const SDValue Op, KnownBits &Known, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth=0) const override
computeKnownBitsForTargetNode - Determine which of the bits specified in Mask are known to be either ...
SDValue LowerCall_64(TargetLowering::CallLoweringInfo &CLI, SmallVectorImpl< SDValue > &InVals) const
bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, EVT VT) const override
Return true if an FMA operation is faster than a pair of fmul and fadd instructions.
SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, const SmallVectorImpl< SDValue > &OutVals, const SDLoc &dl, SelectionDAG &DAG) const override
This hook must be implemented to lower outgoing return values, described by the Outs array,...
Instruction * emitTrailingFence(IRBuilderBase &Builder, Instruction *Inst, AtomicOrdering Ord) const override
SDValue LowerF128Op(SDValue Op, SelectionDAG &DAG, const char *LibFuncName, unsigned numArgs) const
SDValue makeAddress(SDValue Op, SelectionDAG &DAG) const
MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *MBB) const override
This method should be implemented by targets that mark instructions with the 'usesCustomInserter' fla...
SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const
SDValue LowerReturn_32(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, const SmallVectorImpl< SDValue > &OutVals, const SDLoc &DL, SelectionDAG &DAG) const
SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override
This method will be invoked for all target nodes and for any target-independent nodes that the target...
SDValue PerformBITCASTCombine(SDNode *N, DAGCombinerInfo &DCI) const
SDValue LowerReturn_64(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, const SmallVectorImpl< SDValue > &OutVals, const SDLoc &DL, SelectionDAG &DAG) const
SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const override
getSetCCResultType - Return the ISD::SETCC ValueType
SDValue LowerCall_32(TargetLowering::CallLoweringInfo &CLI, SmallVectorImpl< SDValue > &InVals) const
bool useLoadStackGuardNode(const Module &M) const override
Override to support customized stack guard loading.
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
This callback is invoked for operations that are unsupported by the target, which are registered to u...
SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const
SDValue LowerFormalArguments_64(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::InputArg > &Ins, const SDLoc &dl, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals) const
SparcTargetLowering(const TargetMachine &TM, const SparcSubtarget &STI)
void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const override
LowerAsmOperandForConstraint - Lower the specified operand into the Ops vector.
Register getRegisterByName(const char *RegName, LLT VT, const MachineFunction &MF) const override
Return the register ID of the name passed in.
SDValue LowerF128Compare(SDValue LHS, SDValue RHS, unsigned &SPCC, const SDLoc &DL, SelectionDAG &DAG) const
StackOffset holds a fixed and a scalable offset in bytes.
Definition TypeSize.h:30
This class is used to represent ISD::STORE nodes.
const SDValue & getBasePtr() const
const SDValue & getOffset() const
const SDValue & getValue() const
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
constexpr bool empty() const
empty - Check if the string is empty.
Definition StringRef.h:143
char back() const
back - Get the last character in the string.
Definition StringRef.h:155
constexpr size_t size() const
size - Get the string size.
Definition StringRef.h:146
char front() const
front - Get the first character in the string.
Definition StringRef.h:149
constexpr const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Definition StringRef.h:140
A switch()-like statement whose cases are string literals.
StringSwitch & Case(StringLiteral S, T Value)
TargetInstrInfo - Interface to description of machine instruction set.
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
bool PredictableSelectIsExpensive
Tells the code generator that select is more expensive than a branch if the branch is usually predict...
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const
Return the register class that should be used for the specified value type.
MVT getVectorIdxTy(const DataLayout &DL) const
Returns the type to be used for the index operand of: ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT...
const TargetMachine & getTargetMachine() const
virtual unsigned getNumRegistersForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain targets require unusual breakdowns of certain types.
virtual MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain combinations of ABIs, Targets and features require that types are legal for some operations a...
bool isJumpExpensive() const
Return true if Flow Control is an expensive operation that should be avoided.
void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits)
Set the maximum atomic operation size supported by the backend.
void setMinFunctionAlignment(Align Alignment)
Set the target's minimum function alignment.
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose.
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
void setMinCmpXchgSizeInBits(unsigned SizeInBits)
Sets the minimum cmpxchg or ll/sc size supported by the backend.
void setStackPointerRegisterToSaveRestore(Register R)
If set to a physical register, this specifies the register that llvm.savestack/llvm....
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
void setTargetDAGCombine(ArrayRef< ISD::NodeType > NTs)
Targets should invoke this method for each target independent node that they want to provide a custom...
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
const char * getLibcallName(RTLIB::Libcall Call) const
Get the libcall routine name for the specified libcall.
std::vector< ArgListEntry > ArgListTy
void setJumpIsExpensive(bool isExpensive=true)
Tells the code generator not to expand logic operations on comparison predicates into separate sequen...
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
virtual SDValue LowerToTLSEmulatedModel(const GlobalAddressSDNode *GA, SelectionDAG &DAG) const
Lower TLS global address SDNode for target independent emulated TLS model.
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
This function lowers an abstract call to a function into an actual call.
bool isPositionIndependent() const
virtual ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const
Examine constraint string and operand type and determine a weight value.
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
TargetLowering(const TargetLowering &)=delete
virtual bool useLoadStackGuardNode(const Module &M) const
If this function returns true, SelectionDAGBuilder emits a LOAD_STACK_GUARD node when it is lowering ...
virtual void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
Lower the specified operand into the Ops vector.
Primary interface to the complete machine description for the target machine.
TLSModel::Model getTLSModel(const GlobalValue *GV) const
Returns the TLS model which should be used for the given global variable.
bool useEmulatedTLS() const
Returns true if this target uses emulated TLS.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
static constexpr TypeSize getFixed(ScalarTy ExactSize)
Definition TypeSize.h:343
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
Definition Type.cpp:296
static LLVM_ABI Type * getVoidTy(LLVMContext &C)
Definition Type.cpp:280
bool isFP128Ty() const
Return true if this is 'fp128'.
Definition Type.h:162
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Definition Type.cpp:197
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition Type.h:128
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
self_iterator getIterator()
Definition ilist_node.h:123
CallInst * Call
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
Definition ISDOpcodes.h:807
@ CTLZ_ZERO_UNDEF
Definition ISDOpcodes.h:780
@ SMUL_LOHI
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
Definition ISDOpcodes.h:270
@ BSWAP
Byte Swap and Counting operators.
Definition ISDOpcodes.h:771
@ ADDC
Carry-setting nodes for multiple precision addition and subtraction.
Definition ISDOpcodes.h:289
@ ADD
Simple integer binary arithmetic operators.
Definition ISDOpcodes.h:259
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
Definition ISDOpcodes.h:841
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
Definition ISDOpcodes.h:511
@ GlobalAddress
Definition ISDOpcodes.h:88
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
Definition ISDOpcodes.h:868
@ FADD
Simple binary floating point operators.
Definition ISDOpcodes.h:410
@ SDIVREM
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
Definition ISDOpcodes.h:275
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
Definition ISDOpcodes.h:249
@ BUILTIN_OP_END
BUILTIN_OP_END - This must be the last enum value in this list.
@ GlobalTLSAddress
Definition ISDOpcodes.h:89
@ SIGN_EXTEND
Conversion operators.
Definition ISDOpcodes.h:832
@ CTTZ_ZERO_UNDEF
Bit counting operators with an undefined result for zero inputs.
Definition ISDOpcodes.h:779
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
Definition ISDOpcodes.h:784
@ UNDEF
UNDEF - An undefined node.
Definition ISDOpcodes.h:228
@ CopyFromReg
CopyFromReg - This node indicates that the input value is a virtual or physical register that is defi...
Definition ISDOpcodes.h:225
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
Definition ISDOpcodes.h:701
@ SHL
Shift and rotation operations.
Definition ISDOpcodes.h:762
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
Definition ISDOpcodes.h:569
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
Definition ISDOpcodes.h:838
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
Definition ISDOpcodes.h:799
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
Definition ISDOpcodes.h:876
@ FRAMEADDR
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG.
Definition ISDOpcodes.h:110
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
Definition ISDOpcodes.h:914
@ AND
Bitwise operators - logical and, logical or, logical xor.
Definition ISDOpcodes.h:736
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
Definition ISDOpcodes.h:200
@ ADDE
Carry-using nodes for multiple precision addition and subtraction.
Definition ISDOpcodes.h:299
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
Definition ISDOpcodes.h:558
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
Definition ISDOpcodes.h:53
@ FP_ROUND
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
Definition ISDOpcodes.h:947
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
Definition ISDOpcodes.h:844
@ SHL_PARTS
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.
Definition ISDOpcodes.h:821
@ AssertSext
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
Definition ISDOpcodes.h:62
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
Definition ISDOpcodes.h:527
@ BUILD_VECTOR
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...
Definition ISDOpcodes.h:549
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
bool isUnsignedIntSetCC(CondCode Code)
Return true if this is a setcc instruction that performs an unsigned comparison when used with intege...
@ FCC_ULE
Definition Sparc.h:74
@ FCC_UG
Definition Sparc.h:64
@ ICC_G
Definition Sparc.h:46
@ REG_LEZ
Definition Sparc.h:97
@ ICC_L
Definition Sparc.h:49
@ FCC_NE
Definition Sparc.h:68
@ ICC_CS
Definition Sparc.h:53
@ FCC_LG
Definition Sparc.h:67
@ ICC_LEU
Definition Sparc.h:51
@ FCC_LE
Definition Sparc.h:73
@ ICC_LE
Definition Sparc.h:47
@ FCC_U
Definition Sparc.h:62
@ ICC_GE
Definition Sparc.h:48
@ FCC_E
Definition Sparc.h:69
@ REG_LZ
Definition Sparc.h:98
@ FCC_L
Definition Sparc.h:65
@ ICC_GU
Definition Sparc.h:50
@ FCC_O
Definition Sparc.h:75
@ ICC_NE
Definition Sparc.h:44
@ FCC_UE
Definition Sparc.h:70
@ REG_NZ
Definition Sparc.h:99
@ ICC_E
Definition Sparc.h:45
@ FCC_GE
Definition Sparc.h:71
@ FCC_UGE
Definition Sparc.h:72
@ REG_Z
Definition Sparc.h:96
@ ICC_CC
Definition Sparc.h:52
@ REG_GEZ
Definition Sparc.h:101
@ FCC_G
Definition Sparc.h:63
@ FCC_UL
Definition Sparc.h:66
This is an optimization pass for GlobalISel generic memory operations.
@ Offset
Definition DWP.cpp:532
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
Definition STLExtras.h:1655
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
Definition MathExtras.h:165
LLVM_ABI bool isNullConstant(SDValue V)
Returns true if V is a constant integer zero.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
bool isStrongerThanMonotonic(AtomicOrdering AO)
std::string utostr(uint64_t X, bool isNeg=false)
bool isReleaseOrStronger(AtomicOrdering AO)
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1732
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:167
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
AtomicOrdering
Atomic ordering for LLVM's memory model.
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
Definition MCRegister.h:21
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition Alignment.h:144
DWARFExpression::Operation Op
bool isAcquireOrStronger(AtomicOrdering AO)
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
LLVM_ABI bool isOneConstant(SDValue V)
Returns true if V is a constant integer one.
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
Definition Alignment.h:201
LLVM_ABI bool getAsUnsignedInteger(StringRef Str, unsigned Radix, unsigned long long &Result)
Helper functions for StringRef::getAsInteger.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition BitVector.h:869
#define N
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
Extended Value Type.
Definition ValueTypes.h:35
EVT changeVectorElementTypeToInteger() const
Return a vector with the same number of elements as this vector, but with the element type converted ...
Definition ValueTypes.h:94
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
Definition ValueTypes.h:395
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
Definition ValueTypes.h:373
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
Definition ValueTypes.h:65
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
Definition ValueTypes.h:381
bool isVector() const
Return true if this is a vector value type.
Definition ValueTypes.h:168
LLVM_ABI Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
Incoming for lane maks phi as machine instruction, incoming register Reg and incoming block Block are...
void resetAll()
Resets the known state of all bits.
Definition KnownBits.h:74
KnownBits intersectWith(const KnownBits &RHS) const
Returns KnownBits information that is known to be true for both this and RHS.
Definition KnownBits.h:311
This class contains a discriminated union of information about pointers in memory operands,...
MachinePointerInfo getWithOffset(int64_t O) const
static LLVM_ABI MachinePointerInfo getGOT(MachineFunction &MF)
Return a MachinePointerInfo record that refers to a GOT entry.
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition Alignment.h:106
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
This contains information for each constraint that we are lowering.
This structure contains all information that is necessary for lowering calls.
SmallVector< ISD::InputArg, 32 > Ins
CallLoweringInfo & setDebugLoc(const SDLoc &dl)
SmallVector< ISD::OutputArg, 32 > Outs
CallLoweringInfo & setChain(SDValue InChain)
CallLoweringInfo & setCallee(CallingConv::ID CC, Type *ResultType, SDValue Target, ArgListTy &&ArgsList, AttributeSet ResultAttrs={})