LLVM 17.0.0git
M68kISelLowering.cpp
Go to the documentation of this file.
1//===-- M68kISelLowering.cpp - M68k DAG Lowering Impl -----------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8///
9/// \file
10/// This file defines the interfaces that M68k uses to lower LLVM code into a
11/// selection DAG.
12///
13//===----------------------------------------------------------------------===//
14
15#include "M68kISelLowering.h"
16#include "M68kCallingConv.h"
17#include "M68kMachineFunction.h"
18#include "M68kSubtarget.h"
19#include "M68kTargetMachine.h"
21
22#include "llvm/ADT/Statistic.h"
31#include "llvm/IR/CallingConv.h"
35#include "llvm/Support/Debug.h"
39
40using namespace llvm;
41
42#define DEBUG_TYPE "M68k-isel"
43
44STATISTIC(NumTailCalls, "Number of tail calls");
45
47 const M68kSubtarget &STI)
48 : TargetLowering(TM), Subtarget(STI), TM(TM) {
49
50 MVT PtrVT = MVT::i32;
51
53
54 auto *RegInfo = Subtarget.getRegisterInfo();
55 setStackPointerRegisterToSaveRestore(RegInfo->getStackRegister());
56
57 // Set up the register classes.
58 addRegisterClass(MVT::i8, &M68k::DR8RegClass);
59 addRegisterClass(MVT::i16, &M68k::XR16RegClass);
60 addRegisterClass(MVT::i32, &M68k::XR32RegClass);
61
62 for (auto VT : MVT::integer_valuetypes()) {
66 }
67
68 // We don't accept any truncstore of integer registers.
75
78 if (Subtarget.atLeastM68020())
80 else
83
84 for (auto OP :
90 }
91
92 for (auto OP : {ISD::UMUL_LOHI, ISD::SMUL_LOHI}) {
95 }
96
97 // FIXME It would be better to use a custom lowering
98 for (auto OP : {ISD::SMULO, ISD::UMULO}) {
102 }
103
106
107 // Add/Sub overflow ops with MVT::Glues are lowered to CCR dependences.
108 for (auto VT : {MVT::i8, MVT::i16, MVT::i32}) {
113 }
114
115 // SADDO and friends are legal with this setup, i hope
116 for (auto VT : {MVT::i8, MVT::i16, MVT::i32}) {
121 }
122
125
126 for (auto VT : {MVT::i8, MVT::i16, MVT::i32}) {
132 }
133
134 for (auto VT : {MVT::i8, MVT::i16, MVT::i32}) {
139 }
140
147
152
155
157
159
160 // We lower the `atomic-compare-and-swap` to `__sync_val_compare_and_swap`
161 // for subtarget < M68020
164 Subtarget.atLeastM68020() ? Legal : LibCall);
165
166 // M68k does not have native read-modify-write support, so expand all of them
167 // to `__sync_fetch_*` for target < M68020, otherwise expand to CmpxChg.
168 // See `shouldExpandAtomicRMWInIR` below.
170 {
181 },
183
184 // 2^2 bytes
185 // FIXME can it be just 2^1?
186 setMinFunctionAlignment(Align::Constant<2>());
187}
188
191 return Subtarget.atLeastM68020()
194}
195
197 LLVMContext &Context, EVT VT) const {
198 // M68k SETcc producess either 0x00 or 0xFF
199 return MVT::i8;
200}
201
203 EVT Ty) const {
204 if (Ty.isSimple()) {
205 return Ty.getSimpleVT();
206 }
207 return MVT::getIntegerVT(DL.getPointerSizeInBits(0));
208}
209
210#include "M68kGenCallingConv.inc"
211
213
214static StructReturnType
216 if (Outs.empty())
217 return NotStructReturn;
218
219 const ISD::ArgFlagsTy &Flags = Outs[0].Flags;
220 if (!Flags.isSRet())
221 return NotStructReturn;
222 if (Flags.isInReg())
223 return RegStructReturn;
224 return StackStructReturn;
225}
226
227/// Determines whether a function uses struct return semantics.
228static StructReturnType
230 if (Ins.empty())
231 return NotStructReturn;
232
233 const ISD::ArgFlagsTy &Flags = Ins[0].Flags;
234 if (!Flags.isSRet())
235 return NotStructReturn;
236 if (Flags.isInReg())
237 return RegStructReturn;
238 return StackStructReturn;
239}
240
241/// Make a copy of an aggregate at address specified by "Src" to address
242/// "Dst" with size and alignment information specified by the specific
243/// parameter attribute. The copy will be passed as a byval function parameter.
245 SDValue Chain, ISD::ArgFlagsTy Flags,
246 SelectionDAG &DAG, const SDLoc &DL) {
247 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), DL, MVT::i32);
248
249 return DAG.getMemcpy(
250 Chain, DL, Dst, Src, SizeNode, Flags.getNonZeroByValAlign(),
251 /*isVolatile=*/false, /*AlwaysInline=*/true,
252 /*isTailCall=*/false, MachinePointerInfo(), MachinePointerInfo());
253}
254
255/// Return true if the calling convention is one that we can guarantee TCO for.
256static bool canGuaranteeTCO(CallingConv::ID CC) { return false; }
257
258/// Return true if we might ever do TCO for calls with this calling convention.
260 switch (CC) {
261 // C calling conventions:
262 case CallingConv::C:
263 return true;
264 default:
265 return canGuaranteeTCO(CC);
266 }
267}
268
269/// Return true if the function is being made into a tailcall target by
270/// changing its ABI.
271static bool shouldGuaranteeTCO(CallingConv::ID CC, bool GuaranteedTailCallOpt) {
272 return GuaranteedTailCallOpt && canGuaranteeTCO(CC);
273}
274
275/// Return true if the given stack call argument is already available in the
276/// same position (relatively) of the caller's incoming argument stack.
277static bool MatchingStackOffset(SDValue Arg, unsigned Offset,
280 const M68kInstrInfo *TII,
281 const CCValAssign &VA) {
282 unsigned Bytes = Arg.getValueType().getSizeInBits() / 8;
283
284 for (;;) {
285 // Look through nodes that don't alter the bits of the incoming value.
286 unsigned Op = Arg.getOpcode();
287 if (Op == ISD::ZERO_EXTEND || Op == ISD::ANY_EXTEND || Op == ISD::BITCAST) {
288 Arg = Arg.getOperand(0);
289 continue;
290 }
291 if (Op == ISD::TRUNCATE) {
292 const SDValue &TruncInput = Arg.getOperand(0);
293 if (TruncInput.getOpcode() == ISD::AssertZext &&
294 cast<VTSDNode>(TruncInput.getOperand(1))->getVT() ==
295 Arg.getValueType()) {
296 Arg = TruncInput.getOperand(0);
297 continue;
298 }
299 }
300 break;
301 }
302
303 int FI = INT_MAX;
304 if (Arg.getOpcode() == ISD::CopyFromReg) {
305 Register VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg();
307 return false;
308 MachineInstr *Def = MRI->getVRegDef(VR);
309 if (!Def)
310 return false;
311 if (!Flags.isByVal()) {
312 if (!TII->isLoadFromStackSlot(*Def, FI))
313 return false;
314 } else {
315 unsigned Opcode = Def->getOpcode();
316 if ((Opcode == M68k::LEA32p || Opcode == M68k::LEA32f) &&
317 Def->getOperand(1).isFI()) {
318 FI = Def->getOperand(1).getIndex();
319 Bytes = Flags.getByValSize();
320 } else
321 return false;
322 }
323 } else if (auto *Ld = dyn_cast<LoadSDNode>(Arg)) {
324 if (Flags.isByVal())
325 // ByVal argument is passed in as a pointer but it's now being
326 // dereferenced. e.g.
327 // define @foo(%struct.X* %A) {
328 // tail call @bar(%struct.X* byval %A)
329 // }
330 return false;
331 SDValue Ptr = Ld->getBasePtr();
332 FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr);
333 if (!FINode)
334 return false;
335 FI = FINode->getIndex();
336 } else if (Arg.getOpcode() == ISD::FrameIndex && Flags.isByVal()) {
337 FrameIndexSDNode *FINode = cast<FrameIndexSDNode>(Arg);
338 FI = FINode->getIndex();
339 Bytes = Flags.getByValSize();
340 } else
341 return false;
342
343 assert(FI != INT_MAX);
344 if (!MFI.isFixedObjectIndex(FI))
345 return false;
346
347 if (Offset != MFI.getObjectOffset(FI))
348 return false;
349
350 if (VA.getLocVT().getSizeInBits() > Arg.getValueType().getSizeInBits()) {
351 // If the argument location is wider than the argument type, check that any
352 // extension flags match.
353 if (Flags.isZExt() != MFI.isObjectZExt(FI) ||
354 Flags.isSExt() != MFI.isObjectSExt(FI)) {
355 return false;
356 }
357 }
358
359 return Bytes == MFI.getObjectSize(FI);
360}
361
363M68kTargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) const {
366 int ReturnAddrIndex = FuncInfo->getRAIndex();
367
368 if (ReturnAddrIndex == 0) {
369 // Set up a frame object for the return address.
370 unsigned SlotSize = Subtarget.getSlotSize();
371 ReturnAddrIndex = MF.getFrameInfo().CreateFixedObject(
372 SlotSize, -(int64_t)SlotSize, false);
373 FuncInfo->setRAIndex(ReturnAddrIndex);
374 }
375
376 return DAG.getFrameIndex(ReturnAddrIndex, getPointerTy(DAG.getDataLayout()));
377}
378
379SDValue M68kTargetLowering::EmitTailCallLoadRetAddr(SelectionDAG &DAG,
380 SDValue &OutRetAddr,
381 SDValue Chain,
382 bool IsTailCall, int FPDiff,
383 const SDLoc &DL) const {
384 EVT VT = getPointerTy(DAG.getDataLayout());
385 OutRetAddr = getReturnAddressFrameIndex(DAG);
386
387 // Load the "old" Return address.
388 OutRetAddr = DAG.getLoad(VT, DL, Chain, OutRetAddr, MachinePointerInfo());
389 return SDValue(OutRetAddr.getNode(), 1);
390}
391
392SDValue M68kTargetLowering::EmitTailCallStoreRetAddr(
393 SelectionDAG &DAG, MachineFunction &MF, SDValue Chain, SDValue RetFI,
394 EVT PtrVT, unsigned SlotSize, int FPDiff, const SDLoc &DL) const {
395 if (!FPDiff)
396 return Chain;
397
398 // Calculate the new stack slot for the return address.
399 int NewFO = MF.getFrameInfo().CreateFixedObject(
400 SlotSize, (int64_t)FPDiff - SlotSize, false);
401
402 SDValue NewFI = DAG.getFrameIndex(NewFO, PtrVT);
403 // Store the return address to the appropriate stack slot.
404 Chain = DAG.getStore(
405 Chain, DL, RetFI, NewFI,
407 return Chain;
408}
409
411M68kTargetLowering::LowerMemArgument(SDValue Chain, CallingConv::ID CallConv,
413 const SDLoc &DL, SelectionDAG &DAG,
414 const CCValAssign &VA,
415 MachineFrameInfo &MFI,
416 unsigned ArgIdx) const {
417 // Create the nodes corresponding to a load from this parameter slot.
418 ISD::ArgFlagsTy Flags = Ins[ArgIdx].Flags;
419 EVT ValVT;
420
421 // If value is passed by pointer we have address passed instead of the value
422 // itself.
424 ValVT = VA.getLocVT();
425 else
426 ValVT = VA.getValVT();
427
428 // Because we are dealing with BE architecture we need to offset loading of
429 // partial types
430 int Offset = VA.getLocMemOffset();
431 if (VA.getValVT() == MVT::i8) {
432 Offset += 3;
433 } else if (VA.getValVT() == MVT::i16) {
434 Offset += 2;
435 }
436
437 // TODO Interrupt handlers
438 // Calculate SP offset of interrupt parameter, re-arrange the slot normally
439 // taken by a return address.
440
441 // FIXME For now, all byval parameter objects are marked mutable. This can
442 // be changed with more analysis. In case of tail call optimization mark all
443 // arguments mutable. Since they could be overwritten by lowering of arguments
444 // in case of a tail call.
445 bool AlwaysUseMutable = shouldGuaranteeTCO(
446 CallConv, DAG.getTarget().Options.GuaranteedTailCallOpt);
447 bool IsImmutable = !AlwaysUseMutable && !Flags.isByVal();
448
449 if (Flags.isByVal()) {
450 unsigned Bytes = Flags.getByValSize();
451 if (Bytes == 0)
452 Bytes = 1; // Don't create zero-sized stack objects.
453 int FI = MFI.CreateFixedObject(Bytes, Offset, IsImmutable);
454 // TODO Interrupt handlers
455 // Adjust SP offset of interrupt parameter.
456 return DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
457 } else {
458 int FI =
459 MFI.CreateFixedObject(ValVT.getSizeInBits() / 8, Offset, IsImmutable);
460
461 // Set SExt or ZExt flag.
462 if (VA.getLocInfo() == CCValAssign::ZExt) {
463 MFI.setObjectZExt(FI, true);
464 } else if (VA.getLocInfo() == CCValAssign::SExt) {
465 MFI.setObjectSExt(FI, true);
466 }
467
468 // TODO Interrupt handlers
469 // Adjust SP offset of interrupt parameter.
470
471 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
472 SDValue Val = DAG.getLoad(
473 ValVT, DL, Chain, FIN,
475 return VA.isExtInLoc() ? DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val)
476 : Val;
477 }
478}
479
480SDValue M68kTargetLowering::LowerMemOpCallTo(SDValue Chain, SDValue StackPtr,
481 SDValue Arg, const SDLoc &DL,
482 SelectionDAG &DAG,
483 const CCValAssign &VA,
484 ISD::ArgFlagsTy Flags) const {
485 unsigned LocMemOffset = VA.getLocMemOffset();
486 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, DL);
487 PtrOff = DAG.getNode(ISD::ADD, DL, getPointerTy(DAG.getDataLayout()),
488 StackPtr, PtrOff);
489 if (Flags.isByVal())
490 return CreateCopyOfByValArgument(Arg, PtrOff, Chain, Flags, DAG, DL);
491
492 return DAG.getStore(
493 Chain, DL, Arg, PtrOff,
495}
496
497//===----------------------------------------------------------------------===//
498// Call
499//===----------------------------------------------------------------------===//
500
501SDValue M68kTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
502 SmallVectorImpl<SDValue> &InVals) const {
503 SelectionDAG &DAG = CLI.DAG;
504 SDLoc &DL = CLI.DL;
506 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
508 SDValue Chain = CLI.Chain;
509 SDValue Callee = CLI.Callee;
510 CallingConv::ID CallConv = CLI.CallConv;
511 bool &IsTailCall = CLI.IsTailCall;
512 bool IsVarArg = CLI.IsVarArg;
513
516 bool IsSibcall = false;
518 // const M68kRegisterInfo *TRI = Subtarget.getRegisterInfo();
519
520 if (CallConv == CallingConv::M68k_INTR)
521 report_fatal_error("M68k interrupts may not be called directly");
522
523 auto Attr = MF.getFunction().getFnAttribute("disable-tail-calls");
524 if (Attr.getValueAsBool())
525 IsTailCall = false;
526
527 // FIXME Add tailcalls support
528
529 bool IsMustTail = CLI.CB && CLI.CB->isMustTailCall();
530 if (IsMustTail) {
531 // Force this to be a tail call. The verifier rules are enough to ensure
532 // that we can lower this successfully without moving the return address
533 // around.
534 IsTailCall = true;
535 } else if (IsTailCall) {
536 // Check if it's really possible to do a tail call.
537 IsTailCall = IsEligibleForTailCallOptimization(
538 Callee, CallConv, IsVarArg, SR != NotStructReturn,
539 MF.getFunction().hasStructRetAttr(), CLI.RetTy, Outs, OutVals, Ins,
540 DAG);
541
542 // Sibcalls are automatically detected tailcalls which do not require
543 // ABI changes.
544 if (!MF.getTarget().Options.GuaranteedTailCallOpt && IsTailCall)
545 IsSibcall = true;
546
547 if (IsTailCall)
548 ++NumTailCalls;
549 }
550
551 assert(!(IsVarArg && canGuaranteeTCO(CallConv)) &&
552 "Var args not supported with calling convention fastcc");
553
554 // Analyze operands of the call, assigning locations to each operand.
556 SmallVector<Type *, 4> ArgTypes;
557 for (const auto &Arg : CLI.getArgs())
558 ArgTypes.emplace_back(Arg.Ty);
559 M68kCCState CCInfo(ArgTypes, CallConv, IsVarArg, MF, ArgLocs,
560 *DAG.getContext());
561 CCInfo.AnalyzeCallOperands(Outs, CC_M68k);
562
563 // Get a count of how many bytes are to be pushed on the stack.
564 unsigned NumBytes = CCInfo.getAlignedCallFrameSize();
565 if (IsSibcall) {
566 // This is a sibcall. The memory operands are available in caller's
567 // own caller's stack.
568 NumBytes = 0;
569 } else if (MF.getTarget().Options.GuaranteedTailCallOpt &&
570 canGuaranteeTCO(CallConv)) {
571 NumBytes = GetAlignedArgumentStackSize(NumBytes, DAG);
572 }
573
574 int FPDiff = 0;
575 if (IsTailCall && !IsSibcall && !IsMustTail) {
576 // Lower arguments at fp - stackoffset + fpdiff.
577 unsigned NumBytesCallerPushed = MFI->getBytesToPopOnReturn();
578
579 FPDiff = NumBytesCallerPushed - NumBytes;
580
581 // Set the delta of movement of the returnaddr stackslot.
582 // But only set if delta is greater than previous delta.
583 if (FPDiff < MFI->getTCReturnAddrDelta())
584 MFI->setTCReturnAddrDelta(FPDiff);
585 }
586
587 unsigned NumBytesToPush = NumBytes;
588 unsigned NumBytesToPop = NumBytes;
589
590 // If we have an inalloca argument, all stack space has already been allocated
591 // for us and be right at the top of the stack. We don't support multiple
592 // arguments passed in memory when using inalloca.
593 if (!Outs.empty() && Outs.back().Flags.isInAlloca()) {
594 NumBytesToPush = 0;
595 if (!ArgLocs.back().isMemLoc())
596 report_fatal_error("cannot use inalloca attribute on a register "
597 "parameter");
598 if (ArgLocs.back().getLocMemOffset() != 0)
599 report_fatal_error("any parameter with the inalloca attribute must be "
600 "the only memory argument");
601 }
602
603 if (!IsSibcall)
604 Chain = DAG.getCALLSEQ_START(Chain, NumBytesToPush,
605 NumBytes - NumBytesToPush, DL);
606
607 SDValue RetFI;
608 // Load return address for tail calls.
609 if (IsTailCall && FPDiff)
610 Chain = EmitTailCallLoadRetAddr(DAG, RetFI, Chain, IsTailCall, FPDiff, DL);
611
613 SmallVector<SDValue, 8> MemOpChains;
615
616 // Walk the register/memloc assignments, inserting copies/loads. In the case
617 // of tail call optimization arguments are handle later.
618 const M68kRegisterInfo *RegInfo = Subtarget.getRegisterInfo();
619 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
620 ISD::ArgFlagsTy Flags = Outs[i].Flags;
621
622 // Skip inalloca arguments, they have already been written.
623 if (Flags.isInAlloca())
624 continue;
625
626 CCValAssign &VA = ArgLocs[i];
627 EVT RegVT = VA.getLocVT();
628 SDValue Arg = OutVals[i];
629 bool IsByVal = Flags.isByVal();
630
631 // Promote the value if needed.
632 switch (VA.getLocInfo()) {
633 default:
634 llvm_unreachable("Unknown loc info!");
636 break;
638 Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, RegVT, Arg);
639 break;
641 Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, RegVT, Arg);
642 break;
644 Arg = DAG.getNode(ISD::ANY_EXTEND, DL, RegVT, Arg);
645 break;
647 Arg = DAG.getBitcast(RegVT, Arg);
648 break;
650 // Store the argument.
651 SDValue SpillSlot = DAG.CreateStackTemporary(VA.getValVT());
652 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
653 Chain = DAG.getStore(
654 Chain, DL, Arg, SpillSlot,
656 Arg = SpillSlot;
657 break;
658 }
659 }
660
661 if (VA.isRegLoc()) {
662 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
663 } else if (!IsSibcall && (!IsTailCall || IsByVal)) {
664 assert(VA.isMemLoc());
665 if (!StackPtr.getNode()) {
666 StackPtr = DAG.getCopyFromReg(Chain, DL, RegInfo->getStackRegister(),
668 }
669 MemOpChains.push_back(
670 LowerMemOpCallTo(Chain, StackPtr, Arg, DL, DAG, VA, Flags));
671 }
672 }
673
674 if (!MemOpChains.empty())
675 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
676
677 // FIXME Make sure PIC style GOT works as expected
678 // The only time GOT is really needed is for Medium-PIC static data
679 // otherwise we are happy with pc-rel or static references
680
681 if (IsVarArg && IsMustTail) {
682 const auto &Forwards = MFI->getForwardedMustTailRegParms();
683 for (const auto &F : Forwards) {
684 SDValue Val = DAG.getCopyFromReg(Chain, DL, F.VReg, F.VT);
685 RegsToPass.push_back(std::make_pair(unsigned(F.PReg), Val));
686 }
687 }
688
689 // For tail calls lower the arguments to the 'real' stack slots. Sibcalls
690 // don't need this because the eligibility check rejects calls that require
691 // shuffling arguments passed in memory.
692 if (!IsSibcall && IsTailCall) {
693 // Force all the incoming stack arguments to be loaded from the stack
694 // before any new outgoing arguments are stored to the stack, because the
695 // outgoing stack slots may alias the incoming argument stack slots, and
696 // the alias isn't otherwise explicit. This is slightly more conservative
697 // than necessary, because it means that each store effectively depends
698 // on every argument instead of just those arguments it would clobber.
699 SDValue ArgChain = DAG.getStackArgumentTokenFactor(Chain);
700
701 SmallVector<SDValue, 8> MemOpChains2;
702 SDValue FIN;
703 int FI = 0;
704 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
705 CCValAssign &VA = ArgLocs[i];
706 if (VA.isRegLoc())
707 continue;
708 assert(VA.isMemLoc());
709 SDValue Arg = OutVals[i];
710 ISD::ArgFlagsTy Flags = Outs[i].Flags;
711 // Skip inalloca arguments. They don't require any work.
712 if (Flags.isInAlloca())
713 continue;
714 // Create frame index.
715 int32_t Offset = VA.getLocMemOffset() + FPDiff;
716 uint32_t OpSize = (VA.getLocVT().getSizeInBits() + 7) / 8;
717 FI = MF.getFrameInfo().CreateFixedObject(OpSize, Offset, true);
718 FIN = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
719
720 if (Flags.isByVal()) {
721 // Copy relative to framepointer.
723 if (!StackPtr.getNode()) {
724 StackPtr = DAG.getCopyFromReg(Chain, DL, RegInfo->getStackRegister(),
726 }
729
730 MemOpChains2.push_back(
731 CreateCopyOfByValArgument(Source, FIN, ArgChain, Flags, DAG, DL));
732 } else {
733 // Store relative to framepointer.
734 MemOpChains2.push_back(DAG.getStore(
735 ArgChain, DL, Arg, FIN,
737 }
738 }
739
740 if (!MemOpChains2.empty())
741 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains2);
742
743 // Store the return address to the appropriate stack slot.
744 Chain = EmitTailCallStoreRetAddr(DAG, MF, Chain, RetFI,
746 Subtarget.getSlotSize(), FPDiff, DL);
747 }
748
749 // Build a sequence of copy-to-reg nodes chained together with token chain
750 // and flag operands which copy the outgoing args into registers.
751 SDValue InFlag;
752 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
753 Chain = DAG.getCopyToReg(Chain, DL, RegsToPass[i].first,
754 RegsToPass[i].second, InFlag);
755 InFlag = Chain.getValue(1);
756 }
757
758 if (Callee->getOpcode() == ISD::GlobalAddress) {
759 // If the callee is a GlobalAddress node (quite common, every direct call
760 // is) turn it into a TargetGlobalAddress node so that legalize doesn't hack
761 // it.
762 GlobalAddressSDNode *G = cast<GlobalAddressSDNode>(Callee);
763
764 // We should use extra load for direct calls to dllimported functions in
765 // non-JIT mode.
766 const GlobalValue *GV = G->getGlobal();
767 if (!GV->hasDLLImportStorageClass()) {
768 unsigned char OpFlags = Subtarget.classifyGlobalFunctionReference(GV);
769
771 GV, DL, getPointerTy(DAG.getDataLayout()), G->getOffset(), OpFlags);
772
773 if (OpFlags == M68kII::MO_GOTPCREL) {
774
775 // Add a wrapper.
778
779 // Add extra indirection
780 Callee = DAG.getLoad(
783 }
784 }
785 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
787 unsigned char OpFlags =
788 Subtarget.classifyGlobalFunctionReference(nullptr, *Mod);
789
791 S->getSymbol(), getPointerTy(DAG.getDataLayout()), OpFlags);
792 }
793
794 // Returns a chain & a flag for retval copy to use.
795 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
797
798 if (!IsSibcall && IsTailCall) {
799 Chain = DAG.getCALLSEQ_END(Chain, NumBytesToPop, 0, InFlag, DL);
800 InFlag = Chain.getValue(1);
801 }
802
803 Ops.push_back(Chain);
804 Ops.push_back(Callee);
805
806 if (IsTailCall)
807 Ops.push_back(DAG.getConstant(FPDiff, DL, MVT::i32));
808
809 // Add argument registers to the end of the list so that they are known live
810 // into the call.
811 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
812 Ops.push_back(DAG.getRegister(RegsToPass[i].first,
813 RegsToPass[i].second.getValueType()));
814
815 // Add a register mask operand representing the call-preserved registers.
816 const uint32_t *Mask = RegInfo->getCallPreservedMask(MF, CallConv);
817 assert(Mask && "Missing call preserved mask for calling convention");
818
819 Ops.push_back(DAG.getRegisterMask(Mask));
820
821 if (InFlag.getNode())
822 Ops.push_back(InFlag);
823
824 if (IsTailCall) {
826 return DAG.getNode(M68kISD::TC_RETURN, DL, NodeTys, Ops);
827 }
828
829 Chain = DAG.getNode(M68kISD::CALL, DL, NodeTys, Ops);
830 InFlag = Chain.getValue(1);
831
832 // Create the CALLSEQ_END node.
833 unsigned NumBytesForCalleeToPop;
834 if (M68k::isCalleePop(CallConv, IsVarArg,
836 NumBytesForCalleeToPop = NumBytes; // Callee pops everything
837 } else if (!canGuaranteeTCO(CallConv) && SR == StackStructReturn) {
838 // If this is a call to a struct-return function, the callee
839 // pops the hidden struct pointer, so we have to push it back.
840 NumBytesForCalleeToPop = 4;
841 } else {
842 NumBytesForCalleeToPop = 0; // Callee pops nothing.
843 }
844
845 if (CLI.DoesNotReturn && !getTargetMachine().Options.TrapUnreachable) {
846 // No need to reset the stack after the call if the call doesn't return. To
847 // make the MI verify, we'll pretend the callee does it for us.
848 NumBytesForCalleeToPop = NumBytes;
849 }
850
851 // Returns a flag for retval copy to use.
852 if (!IsSibcall) {
853 Chain = DAG.getCALLSEQ_END(Chain, NumBytesToPop, NumBytesForCalleeToPop,
854 InFlag, DL);
855 InFlag = Chain.getValue(1);
856 }
857
858 // Handle result values, copying them out of physregs into vregs that we
859 // return.
860 return LowerCallResult(Chain, InFlag, CallConv, IsVarArg, Ins, DL, DAG,
861 InVals);
862}
863
864SDValue M68kTargetLowering::LowerCallResult(
865 SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool IsVarArg,
866 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
867 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
868
869 // Assign locations to each value returned by this call.
871 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
872 *DAG.getContext());
873 CCInfo.AnalyzeCallResult(Ins, RetCC_M68k);
874
875 // Copy all of the result registers out of their specified physreg.
876 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
877 CCValAssign &VA = RVLocs[i];
878 EVT CopyVT = VA.getLocVT();
879
880 /// ??? is this correct?
881 Chain = DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), CopyVT, InFlag)
882 .getValue(1);
883 SDValue Val = Chain.getValue(0);
884
885 if (VA.isExtInLoc() && VA.getValVT().getScalarType() == MVT::i1)
886 Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val);
887
888 InFlag = Chain.getValue(2);
889 InVals.push_back(Val);
890 }
891
892 return Chain;
893}
894
895//===----------------------------------------------------------------------===//
896// Formal Arguments Calling Convention Implementation
897//===----------------------------------------------------------------------===//
898
899SDValue M68kTargetLowering::LowerFormalArguments(
900 SDValue Chain, CallingConv::ID CCID, bool IsVarArg,
901 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
902 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
905 // const TargetFrameLowering &TFL = *Subtarget.getFrameLowering();
906
907 MachineFrameInfo &MFI = MF.getFrameInfo();
908
909 // Assign locations to all of the incoming arguments.
911 SmallVector<Type *, 4> ArgTypes;
912 for (const Argument &Arg : MF.getFunction().args())
913 ArgTypes.emplace_back(Arg.getType());
914 M68kCCState CCInfo(ArgTypes, CCID, IsVarArg, MF, ArgLocs, *DAG.getContext());
915
916 CCInfo.AnalyzeFormalArguments(Ins, CC_M68k);
917
918 unsigned LastVal = ~0U;
919 SDValue ArgValue;
920 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
921 CCValAssign &VA = ArgLocs[i];
922 assert(VA.getValNo() != LastVal && "Same value in different locations");
923
924 LastVal = VA.getValNo();
925
926 if (VA.isRegLoc()) {
927 EVT RegVT = VA.getLocVT();
928 const TargetRegisterClass *RC;
929 if (RegVT == MVT::i32)
930 RC = &M68k::XR32RegClass;
931 else
932 llvm_unreachable("Unknown argument type!");
933
934 Register Reg = MF.addLiveIn(VA.getLocReg(), RC);
935 ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, RegVT);
936
937 // If this is an 8 or 16-bit value, it is really passed promoted to 32
938 // bits. Insert an assert[sz]ext to capture this, then truncate to the
939 // right size.
940 if (VA.getLocInfo() == CCValAssign::SExt) {
941 ArgValue = DAG.getNode(ISD::AssertSext, DL, RegVT, ArgValue,
942 DAG.getValueType(VA.getValVT()));
943 } else if (VA.getLocInfo() == CCValAssign::ZExt) {
944 ArgValue = DAG.getNode(ISD::AssertZext, DL, RegVT, ArgValue,
945 DAG.getValueType(VA.getValVT()));
946 } else if (VA.getLocInfo() == CCValAssign::BCvt) {
947 ArgValue = DAG.getBitcast(VA.getValVT(), ArgValue);
948 }
949
950 if (VA.isExtInLoc()) {
951 ArgValue = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), ArgValue);
952 }
953 } else {
954 assert(VA.isMemLoc());
955 ArgValue = LowerMemArgument(Chain, CCID, Ins, DL, DAG, VA, MFI, i);
956 }
957
958 // If value is passed via pointer - do a load.
959 // TODO Make sure this handling on indirect arguments is correct
961 ArgValue =
962 DAG.getLoad(VA.getValVT(), DL, Chain, ArgValue, MachinePointerInfo());
963
964 InVals.push_back(ArgValue);
965 }
966
967 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
968 // Swift calling convention does not require we copy the sret argument
969 // into %D0 for the return. We don't set SRetReturnReg for Swift.
970 if (CCID == CallingConv::Swift)
971 continue;
972
973 // ABI require that for returning structs by value we copy the sret argument
974 // into %D0 for the return. Save the argument into a virtual register so
975 // that we can access it from the return points.
976 if (Ins[i].Flags.isSRet()) {
977 unsigned Reg = MMFI->getSRetReturnReg();
978 if (!Reg) {
979 MVT PtrTy = getPointerTy(DAG.getDataLayout());
981 MMFI->setSRetReturnReg(Reg);
982 }
983 SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), DL, Reg, InVals[i]);
984 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Copy, Chain);
985 break;
986 }
987 }
988
989 unsigned StackSize = CCInfo.getNextStackOffset();
990 // Align stack specially for tail calls.
992 StackSize = GetAlignedArgumentStackSize(StackSize, DAG);
993
994 // If the function takes variable number of arguments, make a frame index for
995 // the start of the first vararg value... for expansion of llvm.va_start. We
996 // can skip this if there are no va_start calls.
997 if (MFI.hasVAStart()) {
998 MMFI->setVarArgsFrameIndex(MFI.CreateFixedObject(1, StackSize, true));
999 }
1000
1001 if (IsVarArg && MFI.hasMustTailInVarArgFunc()) {
1002 // We forward some GPRs and some vector types.
1003 SmallVector<MVT, 2> RegParmTypes;
1004 MVT IntVT = MVT::i32;
1005 RegParmTypes.push_back(IntVT);
1006
1007 // Compute the set of forwarded registers. The rest are scratch.
1008 // ??? what is this for?
1011 CCInfo.analyzeMustTailForwardedRegisters(Forwards, RegParmTypes, CC_M68k);
1012
1013 // Copy all forwards from physical to virtual registers.
1014 for (ForwardedRegister &F : Forwards) {
1015 // FIXME Can we use a less constrained schedule?
1016 SDValue RegVal = DAG.getCopyFromReg(Chain, DL, F.VReg, F.VT);
1018 Chain = DAG.getCopyToReg(Chain, DL, F.VReg, RegVal);
1019 }
1020 }
1021
1022 // Some CCs need callee pop.
1023 if (M68k::isCalleePop(CCID, IsVarArg,
1025 MMFI->setBytesToPopOnReturn(StackSize); // Callee pops everything.
1026 } else {
1027 MMFI->setBytesToPopOnReturn(0); // Callee pops nothing.
1028 // If this is an sret function, the return should pop the hidden pointer.
1030 MMFI->setBytesToPopOnReturn(4);
1031 }
1032
1033 MMFI->setArgumentStackSize(StackSize);
1034
1035 return Chain;
1036}
1037
1038//===----------------------------------------------------------------------===//
1039// Return Value Calling Convention Implementation
1040//===----------------------------------------------------------------------===//
1041
1042SDValue
1043M68kTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CCID,
1044 bool IsVarArg,
1046 const SmallVectorImpl<SDValue> &OutVals,
1047 const SDLoc &DL, SelectionDAG &DAG) const {
1050
1052 CCState CCInfo(CCID, IsVarArg, MF, RVLocs, *DAG.getContext());
1053 CCInfo.AnalyzeReturn(Outs, RetCC_M68k);
1054
1055 SDValue Flag;
1057 // Operand #0 = Chain (updated below)
1058 RetOps.push_back(Chain);
1059 // Operand #1 = Bytes To Pop
1060 RetOps.push_back(
1062
1063 // Copy the result values into the output registers.
1064 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
1065 CCValAssign &VA = RVLocs[i];
1066 assert(VA.isRegLoc() && "Can only return in registers!");
1067 SDValue ValToCopy = OutVals[i];
1068 EVT ValVT = ValToCopy.getValueType();
1069
1070 // Promote values to the appropriate types.
1071 if (VA.getLocInfo() == CCValAssign::SExt)
1072 ValToCopy = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), ValToCopy);
1073 else if (VA.getLocInfo() == CCValAssign::ZExt)
1074 ValToCopy = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), ValToCopy);
1075 else if (VA.getLocInfo() == CCValAssign::AExt) {
1076 if (ValVT.isVector() && ValVT.getVectorElementType() == MVT::i1)
1077 ValToCopy = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), ValToCopy);
1078 else
1079 ValToCopy = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), ValToCopy);
1080 } else if (VA.getLocInfo() == CCValAssign::BCvt)
1081 ValToCopy = DAG.getBitcast(VA.getLocVT(), ValToCopy);
1082
1083 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), ValToCopy, Flag);
1084 Flag = Chain.getValue(1);
1085 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
1086 }
1087
1088 // Swift calling convention does not require we copy the sret argument
1089 // into %d0 for the return, and SRetReturnReg is not set for Swift.
1090
1091 // ABI require that for returning structs by value we copy the sret argument
1092 // into %D0 for the return. Save the argument into a virtual register so that
1093 // we can access it from the return points.
1094 //
1095 // Checking Function.hasStructRetAttr() here is insufficient because the IR
1096 // may not have an explicit sret argument. If MFI.CanLowerReturn is
1097 // false, then an sret argument may be implicitly inserted in the SelDAG. In
1098 // either case MFI->setSRetReturnReg() will have been called.
1099 if (unsigned SRetReg = MFI->getSRetReturnReg()) {
1100 // ??? Can i just move this to the top and escape this explanation?
1101 // When we have both sret and another return value, we should use the
1102 // original Chain stored in RetOps[0], instead of the current Chain updated
1103 // in the above loop. If we only have sret, RetOps[0] equals to Chain.
1104
1105 // For the case of sret and another return value, we have
1106 // Chain_0 at the function entry
1107 // Chain_1 = getCopyToReg(Chain_0) in the above loop
1108 // If we use Chain_1 in getCopyFromReg, we will have
1109 // Val = getCopyFromReg(Chain_1)
1110 // Chain_2 = getCopyToReg(Chain_1, Val) from below
1111
1112 // getCopyToReg(Chain_0) will be glued together with
1113 // getCopyToReg(Chain_1, Val) into Unit A, getCopyFromReg(Chain_1) will be
1114 // in Unit B, and we will have cyclic dependency between Unit A and Unit B:
1115 // Data dependency from Unit B to Unit A due to usage of Val in
1116 // getCopyToReg(Chain_1, Val)
1117 // Chain dependency from Unit A to Unit B
1118
1119 // So here, we use RetOps[0] (i.e Chain_0) for getCopyFromReg.
1120 SDValue Val = DAG.getCopyFromReg(RetOps[0], DL, SRetReg,
1122
1123 // ??? How will this work if CC does not use registers for args passing?
1124 // ??? What if I return multiple structs?
1125 unsigned RetValReg = M68k::D0;
1126 Chain = DAG.getCopyToReg(Chain, DL, RetValReg, Val, Flag);
1127 Flag = Chain.getValue(1);
1128
1129 RetOps.push_back(
1130 DAG.getRegister(RetValReg, getPointerTy(DAG.getDataLayout())));
1131 }
1132
1133 RetOps[0] = Chain; // Update chain.
1134
1135 // Add the flag if we have it.
1136 if (Flag.getNode())
1137 RetOps.push_back(Flag);
1138
1139 return DAG.getNode(M68kISD::RET, DL, MVT::Other, RetOps);
1140}
1141
1142//===----------------------------------------------------------------------===//
1143// Fast Calling Convention (tail call) implementation
1144//===----------------------------------------------------------------------===//
1145
1146// Like std call, callee cleans arguments, convention except that ECX is
1147// reserved for storing the tail called function address. Only 2 registers are
1148// free for argument passing (inreg). Tail call optimization is performed
1149// provided:
1150// * tailcallopt is enabled
1151// * caller/callee are fastcc
1152// On M68k_64 architecture with GOT-style position independent code only
1153// local (within module) calls are supported at the moment. To keep the stack
1154// aligned according to platform abi the function GetAlignedArgumentStackSize
1155// ensures that argument delta is always multiples of stack alignment. (Dynamic
1156// linkers need this - darwin's dyld for example) If a tail called function
1157// callee has more arguments than the caller the caller needs to make sure that
1158// there is room to move the RETADDR to. This is achieved by reserving an area
1159// the size of the argument delta right after the original RETADDR, but before
1160// the saved framepointer or the spilled registers e.g. caller(arg1, arg2)
1161// calls callee(arg1, arg2,arg3,arg4) stack layout:
1162// arg1
1163// arg2
1164// RETADDR
1165// [ new RETADDR
1166// move area ]
1167// (possible EBP)
1168// ESI
1169// EDI
1170// local1 ..
1171
1172/// Make the stack size align e.g 16n + 12 aligned for a 16-byte align
1173/// requirement.
1174unsigned
1175M68kTargetLowering::GetAlignedArgumentStackSize(unsigned StackSize,
1176 SelectionDAG &DAG) const {
1177 const TargetFrameLowering &TFI = *Subtarget.getFrameLowering();
1178 unsigned StackAlignment = TFI.getStackAlignment();
1179 uint64_t AlignMask = StackAlignment - 1;
1180 int64_t Offset = StackSize;
1181 unsigned SlotSize = Subtarget.getSlotSize();
1182 if ((Offset & AlignMask) <= (StackAlignment - SlotSize)) {
1183 // Number smaller than 12 so just add the difference.
1184 Offset += ((StackAlignment - SlotSize) - (Offset & AlignMask));
1185 } else {
1186 // Mask out lower bits, add stackalignment once plus the 12 bytes.
1187 Offset =
1188 ((~AlignMask) & Offset) + StackAlignment + (StackAlignment - SlotSize);
1189 }
1190 return Offset;
1191}
1192
1193/// Check whether the call is eligible for tail call optimization. Targets
1194/// that want to do tail call optimization should implement this function.
1195bool M68kTargetLowering::IsEligibleForTailCallOptimization(
1196 SDValue Callee, CallingConv::ID CalleeCC, bool IsVarArg,
1197 bool IsCalleeStructRet, bool IsCallerStructRet, Type *RetTy,
1199 const SmallVectorImpl<SDValue> &OutVals,
1200 const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const {
1201 if (!mayTailCallThisCC(CalleeCC))
1202 return false;
1203
1204 // If -tailcallopt is specified, make fastcc functions tail-callable.
1206 const auto &CallerF = MF.getFunction();
1207
1208 CallingConv::ID CallerCC = CallerF.getCallingConv();
1209 bool CCMatch = CallerCC == CalleeCC;
1210
1212 if (canGuaranteeTCO(CalleeCC) && CCMatch)
1213 return true;
1214 return false;
1215 }
1216
1217 // Look for obvious safe cases to perform tail call optimization that do not
1218 // require ABI changes. This is what gcc calls sibcall.
1219
1220 // Can't do sibcall if stack needs to be dynamically re-aligned. PEI needs to
1221 // emit a special epilogue.
1222 const M68kRegisterInfo *RegInfo = Subtarget.getRegisterInfo();
1223 if (RegInfo->hasStackRealignment(MF))
1224 return false;
1225
1226 // Also avoid sibcall optimization if either caller or callee uses struct
1227 // return semantics.
1228 if (IsCalleeStructRet || IsCallerStructRet)
1229 return false;
1230
1231 // Do not sibcall optimize vararg calls unless all arguments are passed via
1232 // registers.
1233 LLVMContext &C = *DAG.getContext();
1234 if (IsVarArg && !Outs.empty()) {
1235
1237 CCState CCInfo(CalleeCC, IsVarArg, MF, ArgLocs, C);
1238
1239 CCInfo.AnalyzeCallOperands(Outs, CC_M68k);
1240 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i)
1241 if (!ArgLocs[i].isRegLoc())
1242 return false;
1243 }
1244
1245 // Check that the call results are passed in the same way.
1246 if (!CCState::resultsCompatible(CalleeCC, CallerCC, MF, C, Ins, RetCC_M68k,
1247 RetCC_M68k))
1248 return false;
1249
1250 // The callee has to preserve all registers the caller needs to preserve.
1251 const M68kRegisterInfo *TRI = Subtarget.getRegisterInfo();
1252 const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
1253 if (!CCMatch) {
1254 const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
1255 if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
1256 return false;
1257 }
1258
1259 unsigned StackArgsSize = 0;
1260
1261 // If the callee takes no arguments then go on to check the results of the
1262 // call.
1263 if (!Outs.empty()) {
1264 // Check if stack adjustment is needed. For now, do not do this if any
1265 // argument is passed on the stack.
1267 CCState CCInfo(CalleeCC, IsVarArg, MF, ArgLocs, C);
1268
1269 CCInfo.AnalyzeCallOperands(Outs, CC_M68k);
1270 StackArgsSize = CCInfo.getNextStackOffset();
1271
1272 if (CCInfo.getNextStackOffset()) {
1273 // Check if the arguments are already laid out in the right way as
1274 // the caller's fixed stack objects.
1275 MachineFrameInfo &MFI = MF.getFrameInfo();
1276 const MachineRegisterInfo *MRI = &MF.getRegInfo();
1277 const M68kInstrInfo *TII = Subtarget.getInstrInfo();
1278 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1279 CCValAssign &VA = ArgLocs[i];
1280 SDValue Arg = OutVals[i];
1281 ISD::ArgFlagsTy Flags = Outs[i].Flags;
1283 return false;
1284 if (!VA.isRegLoc()) {
1285 if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags, MFI, MRI,
1286 TII, VA))
1287 return false;
1288 }
1289 }
1290 }
1291
1292 bool PositionIndependent = isPositionIndependent();
1293 // If the tailcall address may be in a register, then make sure it's
1294 // possible to register allocate for it. The call address can
1295 // only target %A0 or %A1 since the tail call must be scheduled after
1296 // callee-saved registers are restored. These happen to be the same
1297 // registers used to pass 'inreg' arguments so watch out for those.
1298 if ((!isa<GlobalAddressSDNode>(Callee) &&
1299 !isa<ExternalSymbolSDNode>(Callee)) ||
1300 PositionIndependent) {
1301 unsigned NumInRegs = 0;
1302 // In PIC we need an extra register to formulate the address computation
1303 // for the callee.
1304 unsigned MaxInRegs = PositionIndependent ? 1 : 2;
1305
1306 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1307 CCValAssign &VA = ArgLocs[i];
1308 if (!VA.isRegLoc())
1309 continue;
1310 Register Reg = VA.getLocReg();
1311 switch (Reg) {
1312 default:
1313 break;
1314 case M68k::A0:
1315 case M68k::A1:
1316 if (++NumInRegs == MaxInRegs)
1317 return false;
1318 break;
1319 }
1320 }
1321 }
1322
1323 const MachineRegisterInfo &MRI = MF.getRegInfo();
1324 if (!parametersInCSRMatch(MRI, CallerPreserved, ArgLocs, OutVals))
1325 return false;
1326 }
1327
1328 bool CalleeWillPop = M68k::isCalleePop(
1329 CalleeCC, IsVarArg, MF.getTarget().Options.GuaranteedTailCallOpt);
1330
1331 if (unsigned BytesToPop =
1333 // If we have bytes to pop, the callee must pop them.
1334 bool CalleePopMatches = CalleeWillPop && BytesToPop == StackArgsSize;
1335 if (!CalleePopMatches)
1336 return false;
1337 } else if (CalleeWillPop && StackArgsSize > 0) {
1338 // If we don't have bytes to pop, make sure the callee doesn't pop any.
1339 return false;
1340 }
1341
1342 return true;
1343}
1344
1345//===----------------------------------------------------------------------===//
1346// Custom Lower
1347//===----------------------------------------------------------------------===//
1348
1350 SelectionDAG &DAG) const {
1351 switch (Op.getOpcode()) {
1352 default:
1353 llvm_unreachable("Should not custom lower this!");
1354 case ISD::SADDO:
1355 case ISD::UADDO:
1356 case ISD::SSUBO:
1357 case ISD::USUBO:
1358 case ISD::SMULO:
1359 case ISD::UMULO:
1360 return LowerXALUO(Op, DAG);
1361 case ISD::SETCC:
1362 return LowerSETCC(Op, DAG);
1363 case ISD::SETCCCARRY:
1364 return LowerSETCCCARRY(Op, DAG);
1365 case ISD::SELECT:
1366 return LowerSELECT(Op, DAG);
1367 case ISD::BRCOND:
1368 return LowerBRCOND(Op, DAG);
1369 case ISD::ADDC:
1370 case ISD::ADDE:
1371 case ISD::SUBC:
1372 case ISD::SUBE:
1373 return LowerADDC_ADDE_SUBC_SUBE(Op, DAG);
1374 case ISD::ConstantPool:
1375 return LowerConstantPool(Op, DAG);
1376 case ISD::GlobalAddress:
1377 return LowerGlobalAddress(Op, DAG);
1379 return LowerExternalSymbol(Op, DAG);
1380 case ISD::BlockAddress:
1381 return LowerBlockAddress(Op, DAG);
1382 case ISD::JumpTable:
1383 return LowerJumpTable(Op, DAG);
1384 case ISD::VASTART:
1385 return LowerVASTART(Op, DAG);
1387 return LowerDYNAMIC_STACKALLOC(Op, DAG);
1388 case ISD::SHL_PARTS:
1389 return LowerShiftLeftParts(Op, DAG);
1390 case ISD::SRA_PARTS:
1391 return LowerShiftRightParts(Op, DAG, true);
1392 case ISD::SRL_PARTS:
1393 return LowerShiftRightParts(Op, DAG, false);
1394 }
1395}
1396
1397bool M68kTargetLowering::decomposeMulByConstant(LLVMContext &Context, EVT VT,
1398 SDValue C) const {
1399 // Shifts and add instructions in M68000 and M68010 support
1400 // up to 32 bits, but mul only has 16-bit variant. So it's almost
1401 // certainly beneficial to lower 8/16/32-bit mul to their
1402 // add / shifts counterparts. But for 64-bits mul, it might be
1403 // safer to just leave it to compiler runtime implementations.
1404 return VT.bitsLE(MVT::i32) || Subtarget.atLeastM68020();
1405}
1406
1407SDValue M68kTargetLowering::LowerXALUO(SDValue Op, SelectionDAG &DAG) const {
1408 // Lower the "add/sub/mul with overflow" instruction into a regular ins plus
1409 // a "setcc" instruction that checks the overflow flag. The "brcond" lowering
1410 // looks for this combo and may remove the "setcc" instruction if the "setcc"
1411 // has only one use.
1412 SDNode *N = Op.getNode();
1413 SDValue LHS = N->getOperand(0);
1414 SDValue RHS = N->getOperand(1);
1415 unsigned BaseOp = 0;
1416 unsigned Cond = 0;
1417 SDLoc DL(Op);
1418 switch (Op.getOpcode()) {
1419 default:
1420 llvm_unreachable("Unknown ovf instruction!");
1421 case ISD::SADDO:
1422 BaseOp = M68kISD::ADD;
1424 break;
1425 case ISD::UADDO:
1426 BaseOp = M68kISD::ADD;
1428 break;
1429 case ISD::SSUBO:
1430 BaseOp = M68kISD::SUB;
1432 break;
1433 case ISD::USUBO:
1434 BaseOp = M68kISD::SUB;
1436 break;
1437 }
1438
1439 // Also sets CCR.
1440 SDVTList VTs = DAG.getVTList(N->getValueType(0), MVT::i8);
1441 SDValue Arith = DAG.getNode(BaseOp, DL, VTs, LHS, RHS);
1442 SDValue SetCC = DAG.getNode(M68kISD::SETCC, DL, N->getValueType(1),
1443 DAG.getConstant(Cond, DL, MVT::i8),
1444 SDValue(Arith.getNode(), 1));
1445
1446 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Arith, SetCC);
1447}
1448
1449/// Create a BTST (Bit Test) node - Test bit \p BitNo in \p Src and set
1450/// condition according to equal/not-equal condition code \p CC.
1452 const SDLoc &DL, SelectionDAG &DAG) {
1453 // If Src is i8, promote it to i32 with any_extend. There is no i8 BTST
1454 // instruction. Since the shift amount is in-range-or-undefined, we know
1455 // that doing a bittest on the i32 value is ok.
1456 if (Src.getValueType() == MVT::i8 || Src.getValueType() == MVT::i16)
1457 Src = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Src);
1458
1459 // If the operand types disagree, extend the shift amount to match. Since
1460 // BTST ignores high bits (like shifts) we can use anyextend.
1461 if (Src.getValueType() != BitNo.getValueType())
1462 BitNo = DAG.getNode(ISD::ANY_EXTEND, DL, Src.getValueType(), BitNo);
1463
1464 SDValue BTST = DAG.getNode(M68kISD::BTST, DL, MVT::i32, Src, BitNo);
1465
1466 // NOTE BTST sets CCR.Z flag
1468 return DAG.getNode(M68kISD::SETCC, DL, MVT::i8,
1469 DAG.getConstant(Cond, DL, MVT::i8), BTST);
1470}
1471
1472/// Result of 'and' is compared against zero. Change to a BTST node if possible.
1474 SelectionDAG &DAG) {
1475 SDValue Op0 = And.getOperand(0);
1476 SDValue Op1 = And.getOperand(1);
1477 if (Op0.getOpcode() == ISD::TRUNCATE)
1478 Op0 = Op0.getOperand(0);
1479 if (Op1.getOpcode() == ISD::TRUNCATE)
1480 Op1 = Op1.getOperand(0);
1481
1482 SDValue LHS, RHS;
1483 if (Op1.getOpcode() == ISD::SHL)
1484 std::swap(Op0, Op1);
1485 if (Op0.getOpcode() == ISD::SHL) {
1486 if (isOneConstant(Op0.getOperand(0))) {
1487 // If we looked past a truncate, check that it's only truncating away
1488 // known zeros.
1489 unsigned BitWidth = Op0.getValueSizeInBits();
1490 unsigned AndBitWidth = And.getValueSizeInBits();
1491 if (BitWidth > AndBitWidth) {
1492 auto Known = DAG.computeKnownBits(Op0);
1493 if (Known.countMinLeadingZeros() < BitWidth - AndBitWidth)
1494 return SDValue();
1495 }
1496 LHS = Op1;
1497 RHS = Op0.getOperand(1);
1498 }
1499 } else if (auto *AndRHS = dyn_cast<ConstantSDNode>(Op1)) {
1500 uint64_t AndRHSVal = AndRHS->getZExtValue();
1501 SDValue AndLHS = Op0;
1502
1503 if (AndRHSVal == 1 && AndLHS.getOpcode() == ISD::SRL) {
1504 LHS = AndLHS.getOperand(0);
1505 RHS = AndLHS.getOperand(1);
1506 }
1507
1508 // Use BTST if the immediate can't be encoded in a TEST instruction.
1509 if (!isUInt<32>(AndRHSVal) && isPowerOf2_64(AndRHSVal)) {
1510 LHS = AndLHS;
1511 RHS = DAG.getConstant(Log2_64_Ceil(AndRHSVal), DL, LHS.getValueType());
1512 }
1513 }
1514
1515 if (LHS.getNode())
1516 return getBitTestCondition(LHS, RHS, CC, DL, DAG);
1517
1518 return SDValue();
1519}
1520
1522 switch (SetCCOpcode) {
1523 default:
1524 llvm_unreachable("Invalid integer condition!");
1525 case ISD::SETEQ:
1526 return M68k::COND_EQ;
1527 case ISD::SETGT:
1528 return M68k::COND_GT;
1529 case ISD::SETGE:
1530 return M68k::COND_GE;
1531 case ISD::SETLT:
1532 return M68k::COND_LT;
1533 case ISD::SETLE:
1534 return M68k::COND_LE;
1535 case ISD::SETNE:
1536 return M68k::COND_NE;
1537 case ISD::SETULT:
1538 return M68k::COND_CS;
1539 case ISD::SETUGE:
1540 return M68k::COND_CC;
1541 case ISD::SETUGT:
1542 return M68k::COND_HI;
1543 case ISD::SETULE:
1544 return M68k::COND_LS;
1545 }
1546}
1547
1548/// Do a one-to-one translation of a ISD::CondCode to the M68k-specific
1549/// condition code, returning the condition code and the LHS/RHS of the
1550/// comparison to make.
1551static unsigned TranslateM68kCC(ISD::CondCode SetCCOpcode, const SDLoc &DL,
1552 bool IsFP, SDValue &LHS, SDValue &RHS,
1553 SelectionDAG &DAG) {
1554 if (!IsFP) {
1555 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) {
1556 if (SetCCOpcode == ISD::SETGT && RHSC->isAllOnesValue()) {
1557 // X > -1 -> X == 0, jump !sign.
1558 RHS = DAG.getConstant(0, DL, RHS.getValueType());
1559 return M68k::COND_PL;
1560 }
1561 if (SetCCOpcode == ISD::SETLT && RHSC->isNullValue()) {
1562 // X < 0 -> X == 0, jump on sign.
1563 return M68k::COND_MI;
1564 }
1565 if (SetCCOpcode == ISD::SETLT && RHSC->getZExtValue() == 1) {
1566 // X < 1 -> X <= 0
1567 RHS = DAG.getConstant(0, DL, RHS.getValueType());
1568 return M68k::COND_LE;
1569 }
1570 }
1571
1572 return TranslateIntegerM68kCC(SetCCOpcode);
1573 }
1574
1575 // First determine if it is required or is profitable to flip the operands.
1576
1577 // If LHS is a foldable load, but RHS is not, flip the condition.
1578 if (ISD::isNON_EXTLoad(LHS.getNode()) && !ISD::isNON_EXTLoad(RHS.getNode())) {
1579 SetCCOpcode = getSetCCSwappedOperands(SetCCOpcode);
1580 std::swap(LHS, RHS);
1581 }
1582
1583 switch (SetCCOpcode) {
1584 default:
1585 break;
1586 case ISD::SETOLT:
1587 case ISD::SETOLE:
1588 case ISD::SETUGT:
1589 case ISD::SETUGE:
1590 std::swap(LHS, RHS);
1591 break;
1592 }
1593
1594 // On a floating point condition, the flags are set as follows:
1595 // ZF PF CF op
1596 // 0 | 0 | 0 | X > Y
1597 // 0 | 0 | 1 | X < Y
1598 // 1 | 0 | 0 | X == Y
1599 // 1 | 1 | 1 | unordered
1600 switch (SetCCOpcode) {
1601 default:
1602 llvm_unreachable("Condcode should be pre-legalized away");
1603 case ISD::SETUEQ:
1604 case ISD::SETEQ:
1605 return M68k::COND_EQ;
1606 case ISD::SETOLT: // flipped
1607 case ISD::SETOGT:
1608 case ISD::SETGT:
1609 return M68k::COND_HI;
1610 case ISD::SETOLE: // flipped
1611 case ISD::SETOGE:
1612 case ISD::SETGE:
1613 return M68k::COND_CC;
1614 case ISD::SETUGT: // flipped
1615 case ISD::SETULT:
1616 case ISD::SETLT:
1617 return M68k::COND_CS;
1618 case ISD::SETUGE: // flipped
1619 case ISD::SETULE:
1620 case ISD::SETLE:
1621 return M68k::COND_LS;
1622 case ISD::SETONE:
1623 case ISD::SETNE:
1624 return M68k::COND_NE;
1625 case ISD::SETOEQ:
1626 case ISD::SETUNE:
1627 return M68k::COND_INVALID;
1628 }
1629}
1630
1631// Convert (truncate (srl X, N) to i1) to (bt X, N)
1633 const SDLoc &DL, SelectionDAG &DAG) {
1634
1635 assert(Op.getOpcode() == ISD::TRUNCATE && Op.getValueType() == MVT::i1 &&
1636 "Expected TRUNCATE to i1 node");
1637
1638 if (Op.getOperand(0).getOpcode() != ISD::SRL)
1639 return SDValue();
1640
1641 SDValue ShiftRight = Op.getOperand(0);
1642 return getBitTestCondition(ShiftRight.getOperand(0), ShiftRight.getOperand(1),
1643 CC, DL, DAG);
1644}
1645
1646/// \brief return true if \c Op has a use that doesn't just read flags.
1647static bool hasNonFlagsUse(SDValue Op) {
1648 for (SDNode::use_iterator UI = Op->use_begin(), UE = Op->use_end(); UI != UE;
1649 ++UI) {
1650 SDNode *User = *UI;
1651 unsigned UOpNo = UI.getOperandNo();
1652 if (User->getOpcode() == ISD::TRUNCATE && User->hasOneUse()) {
1653 // Look pass truncate.
1654 UOpNo = User->use_begin().getOperandNo();
1655 User = *User->use_begin();
1656 }
1657
1658 if (User->getOpcode() != ISD::BRCOND && User->getOpcode() != ISD::SETCC &&
1659 !(User->getOpcode() == ISD::SELECT && UOpNo == 0))
1660 return true;
1661 }
1662 return false;
1663}
1664
1665SDValue M68kTargetLowering::EmitTest(SDValue Op, unsigned M68kCC,
1666 const SDLoc &DL, SelectionDAG &DAG) const {
1667
1668 // CF and OF aren't always set the way we want. Determine which
1669 // of these we need.
1670 bool NeedCF = false;
1671 bool NeedOF = false;
1672 switch (M68kCC) {
1673 default:
1674 break;
1675 case M68k::COND_HI:
1676 case M68k::COND_CC:
1677 case M68k::COND_CS:
1678 case M68k::COND_LS:
1679 NeedCF = true;
1680 break;
1681 case M68k::COND_GT:
1682 case M68k::COND_GE:
1683 case M68k::COND_LT:
1684 case M68k::COND_LE:
1685 case M68k::COND_VS:
1686 case M68k::COND_VC: {
1687 // Check if we really need to set the
1688 // Overflow flag. If NoSignedWrap is present
1689 // that is not actually needed.
1690 switch (Op->getOpcode()) {
1691 case ISD::ADD:
1692 case ISD::SUB:
1693 case ISD::MUL:
1694 case ISD::SHL: {
1695 if (Op.getNode()->getFlags().hasNoSignedWrap())
1696 break;
1697 [[fallthrough]];
1698 }
1699 default:
1700 NeedOF = true;
1701 break;
1702 }
1703 break;
1704 }
1705 }
1706 // See if we can use the CCR value from the operand instead of
1707 // doing a separate TEST. TEST always sets OF and CF to 0, so unless
1708 // we prove that the arithmetic won't overflow, we can't use OF or CF.
1709 if (Op.getResNo() != 0 || NeedOF || NeedCF) {
1710 // Emit a CMP with 0, which is the TEST pattern.
1711 return DAG.getNode(M68kISD::CMP, DL, MVT::i8,
1712 DAG.getConstant(0, DL, Op.getValueType()), Op);
1713 }
1714 unsigned Opcode = 0;
1715 unsigned NumOperands = 0;
1716
1717 // Truncate operations may prevent the merge of the SETCC instruction
1718 // and the arithmetic instruction before it. Attempt to truncate the operands
1719 // of the arithmetic instruction and use a reduced bit-width instruction.
1720 bool NeedTruncation = false;
1721 SDValue ArithOp = Op;
1722 if (Op->getOpcode() == ISD::TRUNCATE && Op->hasOneUse()) {
1723 SDValue Arith = Op->getOperand(0);
1724 // Both the trunc and the arithmetic op need to have one user each.
1725 if (Arith->hasOneUse())
1726 switch (Arith.getOpcode()) {
1727 default:
1728 break;
1729 case ISD::ADD:
1730 case ISD::SUB:
1731 case ISD::AND:
1732 case ISD::OR:
1733 case ISD::XOR: {
1734 NeedTruncation = true;
1735 ArithOp = Arith;
1736 }
1737 }
1738 }
1739
1740 // NOTICE: In the code below we use ArithOp to hold the arithmetic operation
1741 // which may be the result of a CAST. We use the variable 'Op', which is the
1742 // non-casted variable when we check for possible users.
1743 switch (ArithOp.getOpcode()) {
1744 case ISD::ADD:
1745 Opcode = M68kISD::ADD;
1746 NumOperands = 2;
1747 break;
1748 case ISD::SHL:
1749 case ISD::SRL:
1750 // If we have a constant logical shift that's only used in a comparison
1751 // against zero turn it into an equivalent AND. This allows turning it into
1752 // a TEST instruction later.
1753 if ((M68kCC == M68k::COND_EQ || M68kCC == M68k::COND_NE) &&
1754 Op->hasOneUse() && isa<ConstantSDNode>(Op->getOperand(1)) &&
1755 !hasNonFlagsUse(Op)) {
1756 EVT VT = Op.getValueType();
1757 unsigned BitWidth = VT.getSizeInBits();
1758 unsigned ShAmt = Op->getConstantOperandVal(1);
1759 if (ShAmt >= BitWidth) // Avoid undefined shifts.
1760 break;
1761 APInt Mask = ArithOp.getOpcode() == ISD::SRL
1763 : APInt::getLowBitsSet(BitWidth, BitWidth - ShAmt);
1764 if (!Mask.isSignedIntN(32)) // Avoid large immediates.
1765 break;
1766 Op = DAG.getNode(ISD::AND, DL, VT, Op->getOperand(0),
1767 DAG.getConstant(Mask, DL, VT));
1768 }
1769 break;
1770
1771 case ISD::AND:
1772 // If the primary 'and' result isn't used, don't bother using
1773 // M68kISD::AND, because a TEST instruction will be better.
1774 if (!hasNonFlagsUse(Op)) {
1775 SDValue Op0 = ArithOp->getOperand(0);
1776 SDValue Op1 = ArithOp->getOperand(1);
1777 EVT VT = ArithOp.getValueType();
1778 bool IsAndn = isBitwiseNot(Op0) || isBitwiseNot(Op1);
1779 bool IsLegalAndnType = VT == MVT::i32 || VT == MVT::i64;
1780
1781 // But if we can combine this into an ANDN operation, then create an AND
1782 // now and allow it to be pattern matched into an ANDN.
1783 if (/*!Subtarget.hasBMI() ||*/ !IsAndn || !IsLegalAndnType)
1784 break;
1785 }
1786 [[fallthrough]];
1787 case ISD::SUB:
1788 case ISD::OR:
1789 case ISD::XOR:
1790 // Due to the ISEL shortcoming noted above, be conservative if this op is
1791 // likely to be selected as part of a load-modify-store instruction.
1792 for (const auto *U : Op.getNode()->uses())
1793 if (U->getOpcode() == ISD::STORE)
1794 goto default_case;
1795
1796 // Otherwise use a regular CCR-setting instruction.
1797 switch (ArithOp.getOpcode()) {
1798 default:
1799 llvm_unreachable("unexpected operator!");
1800 case ISD::SUB:
1801 Opcode = M68kISD::SUB;
1802 break;
1803 case ISD::XOR:
1804 Opcode = M68kISD::XOR;
1805 break;
1806 case ISD::AND:
1807 Opcode = M68kISD::AND;
1808 break;
1809 case ISD::OR:
1810 Opcode = M68kISD::OR;
1811 break;
1812 }
1813
1814 NumOperands = 2;
1815 break;
1816 case M68kISD::ADD:
1817 case M68kISD::SUB:
1818 case M68kISD::OR:
1819 case M68kISD::XOR:
1820 case M68kISD::AND:
1821 return SDValue(Op.getNode(), 1);
1822 default:
1823 default_case:
1824 break;
1825 }
1826
1827 // If we found that truncation is beneficial, perform the truncation and
1828 // update 'Op'.
1829 if (NeedTruncation) {
1830 EVT VT = Op.getValueType();
1831 SDValue WideVal = Op->getOperand(0);
1832 EVT WideVT = WideVal.getValueType();
1833 unsigned ConvertedOp = 0;
1834 // Use a target machine opcode to prevent further DAGCombine
1835 // optimizations that may separate the arithmetic operations
1836 // from the setcc node.
1837 switch (WideVal.getOpcode()) {
1838 default:
1839 break;
1840 case ISD::ADD:
1841 ConvertedOp = M68kISD::ADD;
1842 break;
1843 case ISD::SUB:
1844 ConvertedOp = M68kISD::SUB;
1845 break;
1846 case ISD::AND:
1847 ConvertedOp = M68kISD::AND;
1848 break;
1849 case ISD::OR:
1850 ConvertedOp = M68kISD::OR;
1851 break;
1852 case ISD::XOR:
1853 ConvertedOp = M68kISD::XOR;
1854 break;
1855 }
1856
1857 if (ConvertedOp) {
1858 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1859 if (TLI.isOperationLegal(WideVal.getOpcode(), WideVT)) {
1860 SDValue V0 = DAG.getNode(ISD::TRUNCATE, DL, VT, WideVal.getOperand(0));
1861 SDValue V1 = DAG.getNode(ISD::TRUNCATE, DL, VT, WideVal.getOperand(1));
1862 Op = DAG.getNode(ConvertedOp, DL, VT, V0, V1);
1863 }
1864 }
1865 }
1866
1867 if (Opcode == 0) {
1868 // Emit a CMP with 0, which is the TEST pattern.
1869 return DAG.getNode(M68kISD::CMP, DL, MVT::i8,
1870 DAG.getConstant(0, DL, Op.getValueType()), Op);
1871 }
1872 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i8);
1873 SmallVector<SDValue, 4> Ops(Op->op_begin(), Op->op_begin() + NumOperands);
1874
1875 SDValue New = DAG.getNode(Opcode, DL, VTs, Ops);
1876 DAG.ReplaceAllUsesWith(Op, New);
1877 return SDValue(New.getNode(), 1);
1878}
1879
1880/// \brief Return true if the condition is an unsigned comparison operation.
1881static bool isM68kCCUnsigned(unsigned M68kCC) {
1882 switch (M68kCC) {
1883 default:
1884 llvm_unreachable("Invalid integer condition!");
1885 case M68k::COND_EQ:
1886 case M68k::COND_NE:
1887 case M68k::COND_CS:
1888 case M68k::COND_HI:
1889 case M68k::COND_LS:
1890 case M68k::COND_CC:
1891 return true;
1892 case M68k::COND_GT:
1893 case M68k::COND_GE:
1894 case M68k::COND_LT:
1895 case M68k::COND_LE:
1896 return false;
1897 }
1898}
1899
1900SDValue M68kTargetLowering::EmitCmp(SDValue Op0, SDValue Op1, unsigned M68kCC,
1901 const SDLoc &DL, SelectionDAG &DAG) const {
1902 if (isNullConstant(Op1))
1903 return EmitTest(Op0, M68kCC, DL, DAG);
1904
1905 assert(!(isa<ConstantSDNode>(Op1) && Op0.getValueType() == MVT::i1) &&
1906 "Unexpected comparison operation for MVT::i1 operands");
1907
1908 if ((Op0.getValueType() == MVT::i8 || Op0.getValueType() == MVT::i16 ||
1909 Op0.getValueType() == MVT::i32 || Op0.getValueType() == MVT::i64)) {
1910 // Only promote the compare up to I32 if it is a 16 bit operation
1911 // with an immediate. 16 bit immediates are to be avoided.
1912 if ((Op0.getValueType() == MVT::i16 &&
1913 (isa<ConstantSDNode>(Op0) || isa<ConstantSDNode>(Op1))) &&
1915 unsigned ExtendOp =
1917 Op0 = DAG.getNode(ExtendOp, DL, MVT::i32, Op0);
1918 Op1 = DAG.getNode(ExtendOp, DL, MVT::i32, Op1);
1919 }
1920 // Use SUB instead of CMP to enable CSE between SUB and CMP.
1921 SDVTList VTs = DAG.getVTList(Op0.getValueType(), MVT::i8);
1922 SDValue Sub = DAG.getNode(M68kISD::SUB, DL, VTs, Op0, Op1);
1923 return SDValue(Sub.getNode(), 1);
1924 }
1925 return DAG.getNode(M68kISD::CMP, DL, MVT::i8, Op0, Op1);
1926}
1927
1928/// Result of 'and' or 'trunc to i1' is compared against zero.
1929/// Change to a BTST node if possible.
1930SDValue M68kTargetLowering::LowerToBTST(SDValue Op, ISD::CondCode CC,
1931 const SDLoc &DL,
1932 SelectionDAG &DAG) const {
1933 if (Op.getOpcode() == ISD::AND)
1934 return LowerAndToBTST(Op, CC, DL, DAG);
1935 if (Op.getOpcode() == ISD::TRUNCATE && Op.getValueType() == MVT::i1)
1936 return LowerTruncateToBTST(Op, CC, DL, DAG);
1937 return SDValue();
1938}
1939
1940SDValue M68kTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
1941 MVT VT = Op.getSimpleValueType();
1942 assert(VT == MVT::i8 && "SetCC type must be 8-bit integer");
1943
1944 SDValue Op0 = Op.getOperand(0);
1945 SDValue Op1 = Op.getOperand(1);
1946 SDLoc DL(Op);
1947 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
1948
1949 // Optimize to BTST if possible.
1950 // Lower (X & (1 << N)) == 0 to BTST(X, N).
1951 // Lower ((X >>u N) & 1) != 0 to BTST(X, N).
1952 // Lower ((X >>s N) & 1) != 0 to BTST(X, N).
1953 // Lower (trunc (X >> N) to i1) to BTST(X, N).
1954 if (Op0.hasOneUse() && isNullConstant(Op1) &&
1955 (CC == ISD::SETEQ || CC == ISD::SETNE)) {
1956 if (SDValue NewSetCC = LowerToBTST(Op0, CC, DL, DAG)) {
1957 if (VT == MVT::i1)
1958 return DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, NewSetCC);
1959 return NewSetCC;
1960 }
1961 }
1962
1963 // Look for X == 0, X == 1, X != 0, or X != 1. We can simplify some forms of
1964 // these.
1965 if ((isOneConstant(Op1) || isNullConstant(Op1)) &&
1966 (CC == ISD::SETEQ || CC == ISD::SETNE)) {
1967
1968 // If the input is a setcc, then reuse the input setcc or use a new one with
1969 // the inverted condition.
1970 if (Op0.getOpcode() == M68kISD::SETCC) {
1972 bool Invert = (CC == ISD::SETNE) ^ isNullConstant(Op1);
1973 if (!Invert)
1974 return Op0;
1975
1976 CCode = M68k::GetOppositeBranchCondition(CCode);
1977 SDValue SetCC =
1979 DAG.getConstant(CCode, DL, MVT::i8), Op0.getOperand(1));
1980 if (VT == MVT::i1)
1981 return DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, SetCC);
1982 return SetCC;
1983 }
1984 }
1985 if (Op0.getValueType() == MVT::i1 && (CC == ISD::SETEQ || CC == ISD::SETNE)) {
1986 if (isOneConstant(Op1)) {
1988 return DAG.getSetCC(DL, VT, Op0, DAG.getConstant(0, DL, MVT::i1), NewCC);
1989 }
1990 if (!isNullConstant(Op1)) {
1991 SDValue Xor = DAG.getNode(ISD::XOR, DL, MVT::i1, Op0, Op1);
1992 return DAG.getSetCC(DL, VT, Xor, DAG.getConstant(0, DL, MVT::i1), CC);
1993 }
1994 }
1995
1996 bool IsFP = Op1.getSimpleValueType().isFloatingPoint();
1997 unsigned M68kCC = TranslateM68kCC(CC, DL, IsFP, Op0, Op1, DAG);
1998 if (M68kCC == M68k::COND_INVALID)
1999 return SDValue();
2000
2001 SDValue CCR = EmitCmp(Op0, Op1, M68kCC, DL, DAG);
2002 return DAG.getNode(M68kISD::SETCC, DL, MVT::i8,
2003 DAG.getConstant(M68kCC, DL, MVT::i8), CCR);
2004}
2005
2006SDValue M68kTargetLowering::LowerSETCCCARRY(SDValue Op,
2007 SelectionDAG &DAG) const {
2008 SDValue LHS = Op.getOperand(0);
2009 SDValue RHS = Op.getOperand(1);
2010 SDValue Carry = Op.getOperand(2);
2011 SDValue Cond = Op.getOperand(3);
2012 SDLoc DL(Op);
2013
2014 assert(LHS.getSimpleValueType().isInteger() && "SETCCCARRY is integer only.");
2015 M68k::CondCode CC = TranslateIntegerM68kCC(cast<CondCodeSDNode>(Cond)->get());
2016
2017 EVT CarryVT = Carry.getValueType();
2018 APInt NegOne = APInt::getAllOnes(CarryVT.getScalarSizeInBits());
2019 Carry = DAG.getNode(M68kISD::ADD, DL, DAG.getVTList(CarryVT, MVT::i32), Carry,
2020 DAG.getConstant(NegOne, DL, CarryVT));
2021
2022 SDVTList VTs = DAG.getVTList(LHS.getValueType(), MVT::i32);
2023 SDValue Cmp =
2024 DAG.getNode(M68kISD::SUBX, DL, VTs, LHS, RHS, Carry.getValue(1));
2025
2026 return DAG.getNode(M68kISD::SETCC, DL, MVT::i8,
2027 DAG.getConstant(CC, DL, MVT::i8), Cmp.getValue(1));
2028}
2029
2030/// Return true if opcode is a M68k logical comparison.
2031static bool isM68kLogicalCmp(SDValue Op) {
2032 unsigned Opc = Op.getNode()->getOpcode();
2033 if (Opc == M68kISD::CMP)
2034 return true;
2035 if (Op.getResNo() == 1 &&
2036 (Opc == M68kISD::ADD || Opc == M68kISD::SUB || Opc == M68kISD::ADDX ||
2037 Opc == M68kISD::SUBX || Opc == M68kISD::SMUL || Opc == M68kISD::UMUL ||
2038 Opc == M68kISD::OR || Opc == M68kISD::XOR || Opc == M68kISD::AND))
2039 return true;
2040
2041 if (Op.getResNo() == 2 && Opc == M68kISD::UMUL)
2042 return true;
2043
2044 return false;
2045}
2046
2048 if (V.getOpcode() != ISD::TRUNCATE)
2049 return false;
2050
2051 SDValue VOp0 = V.getOperand(0);
2052 unsigned InBits = VOp0.getValueSizeInBits();
2053 unsigned Bits = V.getValueSizeInBits();
2054 return DAG.MaskedValueIsZero(VOp0,
2055 APInt::getHighBitsSet(InBits, InBits - Bits));
2056}
2057
2058SDValue M68kTargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
2059 bool addTest = true;
2060 SDValue Cond = Op.getOperand(0);
2061 SDValue Op1 = Op.getOperand(1);
2062 SDValue Op2 = Op.getOperand(2);
2063 SDLoc DL(Op);
2064 SDValue CC;
2065
2066 if (Cond.getOpcode() == ISD::SETCC) {
2067 if (SDValue NewCond = LowerSETCC(Cond, DAG))
2068 Cond = NewCond;
2069 }
2070
2071 // (select (x == 0), -1, y) -> (sign_bit (x - 1)) | y
2072 // (select (x == 0), y, -1) -> ~(sign_bit (x - 1)) | y
2073 // (select (x != 0), y, -1) -> (sign_bit (x - 1)) | y
2074 // (select (x != 0), -1, y) -> ~(sign_bit (x - 1)) | y
2075 if (Cond.getOpcode() == M68kISD::SETCC &&
2076 Cond.getOperand(1).getOpcode() == M68kISD::CMP &&
2077 isNullConstant(Cond.getOperand(1).getOperand(0))) {
2078 SDValue Cmp = Cond.getOperand(1);
2079
2080 unsigned CondCode =
2081 cast<ConstantSDNode>(Cond.getOperand(0))->getZExtValue();
2082
2083 if ((isAllOnesConstant(Op1) || isAllOnesConstant(Op2)) &&
2084 (CondCode == M68k::COND_EQ || CondCode == M68k::COND_NE)) {
2085 SDValue Y = isAllOnesConstant(Op2) ? Op1 : Op2;
2086
2087 SDValue CmpOp0 = Cmp.getOperand(1);
2088 // Apply further optimizations for special cases
2089 // (select (x != 0), -1, 0) -> neg & sbb
2090 // (select (x == 0), 0, -1) -> neg & sbb
2091 if (isNullConstant(Y) &&
2092 (isAllOnesConstant(Op1) == (CondCode == M68k::COND_NE))) {
2093
2094 SDVTList VTs = DAG.getVTList(CmpOp0.getValueType(), MVT::i32);
2095
2096 SDValue Neg =
2097 DAG.getNode(M68kISD::SUB, DL, VTs,
2098 DAG.getConstant(0, DL, CmpOp0.getValueType()), CmpOp0);
2099
2100 SDValue Res = DAG.getNode(M68kISD::SETCC_CARRY, DL, Op.getValueType(),
2102 SDValue(Neg.getNode(), 1));
2103 return Res;
2104 }
2105
2107 DAG.getConstant(1, DL, CmpOp0.getValueType()), CmpOp0);
2108
2109 SDValue Res = // Res = 0 or -1.
2110 DAG.getNode(M68kISD::SETCC_CARRY, DL, Op.getValueType(),
2111 DAG.getConstant(M68k::COND_CS, DL, MVT::i8), Cmp);
2112
2113 if (isAllOnesConstant(Op1) != (CondCode == M68k::COND_EQ))
2114 Res = DAG.getNOT(DL, Res, Res.getValueType());
2115
2116 if (!isNullConstant(Op2))
2117 Res = DAG.getNode(ISD::OR, DL, Res.getValueType(), Res, Y);
2118 return Res;
2119 }
2120 }
2121
2122 // Look past (and (setcc_carry (cmp ...)), 1).
2123 if (Cond.getOpcode() == ISD::AND &&
2124 Cond.getOperand(0).getOpcode() == M68kISD::SETCC_CARRY &&
2125 isOneConstant(Cond.getOperand(1)))
2126 Cond = Cond.getOperand(0);
2127
2128 // If condition flag is set by a M68kISD::CMP, then use it as the condition
2129 // setting operand in place of the M68kISD::SETCC.
2130 unsigned CondOpcode = Cond.getOpcode();
2131 if (CondOpcode == M68kISD::SETCC || CondOpcode == M68kISD::SETCC_CARRY) {
2132 CC = Cond.getOperand(0);
2133
2134 SDValue Cmp = Cond.getOperand(1);
2135 unsigned Opc = Cmp.getOpcode();
2136
2137 bool IllegalFPCMov = false;
2138
2139 if ((isM68kLogicalCmp(Cmp) && !IllegalFPCMov) || Opc == M68kISD::BTST) {
2140 Cond = Cmp;
2141 addTest = false;
2142 }
2143 } else if (CondOpcode == ISD::USUBO || CondOpcode == ISD::SSUBO ||
2144 CondOpcode == ISD::UADDO || CondOpcode == ISD::SADDO ||
2145 CondOpcode == ISD::UMULO || CondOpcode == ISD::SMULO) {
2146 SDValue LHS = Cond.getOperand(0);
2147 SDValue RHS = Cond.getOperand(1);
2148 unsigned MxOpcode;
2149 unsigned MxCond;
2150 SDVTList VTs;
2151 switch (CondOpcode) {
2152 case ISD::UADDO:
2153 MxOpcode = M68kISD::ADD;
2154 MxCond = M68k::COND_CS;
2155 break;
2156 case ISD::SADDO:
2157 MxOpcode = M68kISD::ADD;
2158 MxCond = M68k::COND_VS;
2159 break;
2160 case ISD::USUBO:
2161 MxOpcode = M68kISD::SUB;
2162 MxCond = M68k::COND_CS;
2163 break;
2164 case ISD::SSUBO:
2165 MxOpcode = M68kISD::SUB;
2166 MxCond = M68k::COND_VS;
2167 break;
2168 case ISD::UMULO:
2169 MxOpcode = M68kISD::UMUL;
2170 MxCond = M68k::COND_VS;
2171 break;
2172 case ISD::SMULO:
2173 MxOpcode = M68kISD::SMUL;
2174 MxCond = M68k::COND_VS;
2175 break;
2176 default:
2177 llvm_unreachable("unexpected overflowing operator");
2178 }
2179 if (CondOpcode == ISD::UMULO)
2180 VTs = DAG.getVTList(LHS.getValueType(), LHS.getValueType(), MVT::i32);
2181 else
2182 VTs = DAG.getVTList(LHS.getValueType(), MVT::i32);
2183
2184 SDValue MxOp = DAG.getNode(MxOpcode, DL, VTs, LHS, RHS);
2185
2186 if (CondOpcode == ISD::UMULO)
2187 Cond = MxOp.getValue(2);
2188 else
2189 Cond = MxOp.getValue(1);
2190
2191 CC = DAG.getConstant(MxCond, DL, MVT::i8);
2192 addTest = false;
2193 }
2194
2195 if (addTest) {
2196 // Look past the truncate if the high bits are known zero.
2198 Cond = Cond.getOperand(0);
2199
2200 // We know the result of AND is compared against zero. Try to match
2201 // it to BT.
2202 if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) {
2203 if (SDValue NewSetCC = LowerToBTST(Cond, ISD::SETNE, DL, DAG)) {
2204 CC = NewSetCC.getOperand(0);
2205 Cond = NewSetCC.getOperand(1);
2206 addTest = false;
2207 }
2208 }
2209 }
2210
2211 if (addTest) {
2213 Cond = EmitTest(Cond, M68k::COND_NE, DL, DAG);
2214 }
2215
2216 // a < b ? -1 : 0 -> RES = ~setcc_carry
2217 // a < b ? 0 : -1 -> RES = setcc_carry
2218 // a >= b ? -1 : 0 -> RES = setcc_carry
2219 // a >= b ? 0 : -1 -> RES = ~setcc_carry
2220 if (Cond.getOpcode() == M68kISD::SUB) {
2221 unsigned CondCode = cast<ConstantSDNode>(CC)->getZExtValue();
2222
2223 if ((CondCode == M68k::COND_CC || CondCode == M68k::COND_CS) &&
2224 (isAllOnesConstant(Op1) || isAllOnesConstant(Op2)) &&
2225 (isNullConstant(Op1) || isNullConstant(Op2))) {
2226 SDValue Res =
2227 DAG.getNode(M68kISD::SETCC_CARRY, DL, Op.getValueType(),
2229 if (isAllOnesConstant(Op1) != (CondCode == M68k::COND_CS))
2230 return DAG.getNOT(DL, Res, Res.getValueType());
2231 return Res;
2232 }
2233 }
2234
2235 // M68k doesn't have an i8 cmov. If both operands are the result of a
2236 // truncate widen the cmov and push the truncate through. This avoids
2237 // introducing a new branch during isel and doesn't add any extensions.
2238 if (Op.getValueType() == MVT::i8 && Op1.getOpcode() == ISD::TRUNCATE &&
2239 Op2.getOpcode() == ISD::TRUNCATE) {
2240 SDValue T1 = Op1.getOperand(0), T2 = Op2.getOperand(0);
2241 if (T1.getValueType() == T2.getValueType() &&
2242 // Block CopyFromReg so partial register stalls are avoided.
2243 T1.getOpcode() != ISD::CopyFromReg &&
2244 T2.getOpcode() != ISD::CopyFromReg) {
2245 SDVTList VTs = DAG.getVTList(T1.getValueType(), MVT::Glue);
2246 SDValue Cmov = DAG.getNode(M68kISD::CMOV, DL, VTs, T2, T1, CC, Cond);
2247 return DAG.getNode(ISD::TRUNCATE, DL, Op.getValueType(), Cmov);
2248 }
2249 }
2250
2251 // M68kISD::CMOV means set the result (which is operand 1) to the RHS if
2252 // condition is true.
2253 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue);
2254 SDValue Ops[] = {Op2, Op1, CC, Cond};
2255 return DAG.getNode(M68kISD::CMOV, DL, VTs, Ops);
2256}
2257
2258/// Return true if node is an ISD::AND or ISD::OR of two M68k::SETcc nodes
2259/// each of which has no other use apart from the AND / OR.
2260static bool isAndOrOfSetCCs(SDValue Op, unsigned &Opc) {
2261 Opc = Op.getOpcode();
2262 if (Opc != ISD::OR && Opc != ISD::AND)
2263 return false;
2264 return (M68k::IsSETCC(Op.getOperand(0).getOpcode()) &&
2265 Op.getOperand(0).hasOneUse() &&
2266 M68k::IsSETCC(Op.getOperand(1).getOpcode()) &&
2267 Op.getOperand(1).hasOneUse());
2268}
2269
2270/// Return true if node is an ISD::XOR of a M68kISD::SETCC and 1 and that the
2271/// SETCC node has a single use.
2272static bool isXor1OfSetCC(SDValue Op) {
2273 if (Op.getOpcode() != ISD::XOR)
2274 return false;
2275 if (isOneConstant(Op.getOperand(1)))
2276 return Op.getOperand(0).getOpcode() == M68kISD::SETCC &&
2277 Op.getOperand(0).hasOneUse();
2278 return false;
2279}
2280
2281SDValue M68kTargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
2282 bool AddTest = true;
2283 SDValue Chain = Op.getOperand(0);
2284 SDValue Cond = Op.getOperand(1);
2285 SDValue Dest = Op.getOperand(2);
2286 SDLoc DL(Op);
2287 SDValue CC;
2288 bool Inverted = false;
2289
2290 if (Cond.getOpcode() == ISD::SETCC) {
2291 // Check for setcc([su]{add,sub}o == 0).
2292 if (cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETEQ &&
2293 isNullConstant(Cond.getOperand(1)) &&
2294 Cond.getOperand(0).getResNo() == 1 &&
2295 (Cond.getOperand(0).getOpcode() == ISD::SADDO ||
2296 Cond.getOperand(0).getOpcode() == ISD::UADDO ||
2297 Cond.getOperand(0).getOpcode() == ISD::SSUBO ||
2298 Cond.getOperand(0).getOpcode() == ISD::USUBO)) {
2299 Inverted = true;
2300 Cond = Cond.getOperand(0);
2301 } else {
2302 if (SDValue NewCond = LowerSETCC(Cond, DAG))
2303 Cond = NewCond;
2304 }
2305 }
2306
2307 // Look pass (and (setcc_carry (cmp ...)), 1).
2308 if (Cond.getOpcode() == ISD::AND &&
2309 Cond.getOperand(0).getOpcode() == M68kISD::SETCC_CARRY &&
2310 isOneConstant(Cond.getOperand(1)))
2311 Cond = Cond.getOperand(0);
2312
2313 // If condition flag is set by a M68kISD::CMP, then use it as the condition
2314 // setting operand in place of the M68kISD::SETCC.
2315 unsigned CondOpcode = Cond.getOpcode();
2316 if (CondOpcode == M68kISD::SETCC || CondOpcode == M68kISD::SETCC_CARRY) {
2317 CC = Cond.getOperand(0);
2318
2319 SDValue Cmp = Cond.getOperand(1);
2320 unsigned Opc = Cmp.getOpcode();
2321
2322 if (isM68kLogicalCmp(Cmp) || Opc == M68kISD::BTST) {
2323 Cond = Cmp;
2324 AddTest = false;
2325 } else {
2326 switch (cast<ConstantSDNode>(CC)->getZExtValue()) {
2327 default:
2328 break;
2329 case M68k::COND_VS:
2330 case M68k::COND_CS:
2331 // These can only come from an arithmetic instruction with overflow,
2332 // e.g. SADDO, UADDO.
2333 Cond = Cond.getNode()->getOperand(1);
2334 AddTest = false;
2335 break;
2336 }
2337 }
2338 }
2339 CondOpcode = Cond.getOpcode();
2340 if (CondOpcode == ISD::UADDO || CondOpcode == ISD::SADDO ||
2341 CondOpcode == ISD::USUBO || CondOpcode == ISD::SSUBO) {
2342 SDValue LHS = Cond.getOperand(0);
2343 SDValue RHS = Cond.getOperand(1);
2344 unsigned MxOpcode;
2345 unsigned MxCond;
2346 SDVTList VTs;
2347 // Keep this in sync with LowerXALUO, otherwise we might create redundant
2348 // instructions that can't be removed afterwards (i.e. M68kISD::ADD and
2349 // M68kISD::INC).
2350 switch (CondOpcode) {
2351 case ISD::UADDO:
2352 MxOpcode = M68kISD::ADD;
2353 MxCond = M68k::COND_CS;
2354 break;
2355 case ISD::SADDO:
2356 MxOpcode = M68kISD::ADD;
2357 MxCond = M68k::COND_VS;
2358 break;
2359 case ISD::USUBO:
2360 MxOpcode = M68kISD::SUB;
2361 MxCond = M68k::COND_CS;
2362 break;
2363 case ISD::SSUBO:
2364 MxOpcode = M68kISD::SUB;
2365 MxCond = M68k::COND_VS;
2366 break;
2367 case ISD::UMULO:
2368 MxOpcode = M68kISD::UMUL;
2369 MxCond = M68k::COND_VS;
2370 break;
2371 case ISD::SMULO:
2372 MxOpcode = M68kISD::SMUL;
2373 MxCond = M68k::COND_VS;
2374 break;
2375 default:
2376 llvm_unreachable("unexpected overflowing operator");
2377 }
2378
2379 if (Inverted)
2381
2382 if (CondOpcode == ISD::UMULO)
2383 VTs = DAG.getVTList(LHS.getValueType(), LHS.getValueType(), MVT::i8);
2384 else
2385 VTs = DAG.getVTList(LHS.getValueType(), MVT::i8);
2386
2387 SDValue MxOp = DAG.getNode(MxOpcode, DL, VTs, LHS, RHS);
2388
2389 if (CondOpcode == ISD::UMULO)
2390 Cond = MxOp.getValue(2);
2391 else
2392 Cond = MxOp.getValue(1);
2393
2394 CC = DAG.getConstant(MxCond, DL, MVT::i8);
2395 AddTest = false;
2396 } else {
2397 unsigned CondOpc;
2398 if (Cond.hasOneUse() && isAndOrOfSetCCs(Cond, CondOpc)) {
2399 SDValue Cmp = Cond.getOperand(0).getOperand(1);
2400 if (CondOpc == ISD::OR) {
2401 // Also, recognize the pattern generated by an FCMP_UNE. We can emit
2402 // two branches instead of an explicit OR instruction with a
2403 // separate test.
2404 if (Cmp == Cond.getOperand(1).getOperand(1) && isM68kLogicalCmp(Cmp)) {
2405 CC = Cond.getOperand(0).getOperand(0);
2406 Chain = DAG.getNode(M68kISD::BRCOND, DL, Op.getValueType(), Chain,
2407 Dest, CC, Cmp);
2408 CC = Cond.getOperand(1).getOperand(0);
2409 Cond = Cmp;
2410 AddTest = false;
2411 }
2412 } else { // ISD::AND
2413 // Also, recognize the pattern generated by an FCMP_OEQ. We can emit
2414 // two branches instead of an explicit AND instruction with a
2415 // separate test. However, we only do this if this block doesn't
2416 // have a fall-through edge, because this requires an explicit
2417 // jmp when the condition is false.
2418 if (Cmp == Cond.getOperand(1).getOperand(1) && isM68kLogicalCmp(Cmp) &&
2419 Op.getNode()->hasOneUse()) {
2420 M68k::CondCode CCode =
2421 (M68k::CondCode)Cond.getOperand(0).getConstantOperandVal(0);
2422 CCode = M68k::GetOppositeBranchCondition(CCode);
2423 CC = DAG.getConstant(CCode, DL, MVT::i8);
2424 SDNode *User = *Op.getNode()->use_begin();
2425 // Look for an unconditional branch following this conditional branch.
2426 // We need this because we need to reverse the successors in order
2427 // to implement FCMP_OEQ.
2428 if (User->getOpcode() == ISD::BR) {
2429 SDValue FalseBB = User->getOperand(1);
2430 SDNode *NewBR =
2431 DAG.UpdateNodeOperands(User, User->getOperand(0), Dest);
2432 assert(NewBR == User);
2433 (void)NewBR;
2434 Dest = FalseBB;
2435
2436 Chain = DAG.getNode(M68kISD::BRCOND, DL, Op.getValueType(), Chain,
2437 Dest, CC, Cmp);
2438 M68k::CondCode CCode =
2439 (M68k::CondCode)Cond.getOperand(1).getConstantOperandVal(0);
2440 CCode = M68k::GetOppositeBranchCondition(CCode);
2441 CC = DAG.getConstant(CCode, DL, MVT::i8);
2442 Cond = Cmp;
2443 AddTest = false;
2444 }
2445 }
2446 }
2447 } else if (Cond.hasOneUse() && isXor1OfSetCC(Cond)) {
2448 // Recognize for xorb (setcc), 1 patterns. The xor inverts the condition.
2449 // It should be transformed during dag combiner except when the condition
2450 // is set by a arithmetics with overflow node.
2451 M68k::CondCode CCode =
2452 (M68k::CondCode)Cond.getOperand(0).getConstantOperandVal(0);
2453 CCode = M68k::GetOppositeBranchCondition(CCode);
2454 CC = DAG.getConstant(CCode, DL, MVT::i8);
2455 Cond = Cond.getOperand(0).getOperand(1);
2456 AddTest = false;
2457 }
2458 }
2459
2460 if (AddTest) {
2461 // Look pass the truncate if the high bits are known zero.
2463 Cond = Cond.getOperand(0);
2464
2465 // We know the result is compared against zero. Try to match it to BT.
2466 if (Cond.hasOneUse()) {
2467 if (SDValue NewSetCC = LowerToBTST(Cond, ISD::SETNE, DL, DAG)) {
2468 CC = NewSetCC.getOperand(0);
2469 Cond = NewSetCC.getOperand(1);
2470 AddTest = false;
2471 }
2472 }
2473 }
2474
2475 if (AddTest) {
2476 M68k::CondCode MxCond = Inverted ? M68k::COND_EQ : M68k::COND_NE;
2477 CC = DAG.getConstant(MxCond, DL, MVT::i8);
2478 Cond = EmitTest(Cond, MxCond, DL, DAG);
2479 }
2480 return DAG.getNode(M68kISD::BRCOND, DL, Op.getValueType(), Chain, Dest, CC,
2481 Cond);
2482}
2483
2484SDValue M68kTargetLowering::LowerADDC_ADDE_SUBC_SUBE(SDValue Op,
2485 SelectionDAG &DAG) const {
2486 MVT VT = Op.getNode()->getSimpleValueType(0);
2487
2488 // Let legalize expand this if it isn't a legal type yet.
2489 if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
2490 return SDValue();
2491
2492 SDVTList VTs = DAG.getVTList(VT, MVT::i8);
2493
2494 unsigned Opc;
2495 bool ExtraOp = false;
2496 switch (Op.getOpcode()) {
2497 default:
2498 llvm_unreachable("Invalid code");
2499 case ISD::ADDC:
2500 Opc = M68kISD::ADD;
2501 break;
2502 case ISD::ADDE:
2503 Opc = M68kISD::ADDX;
2504 ExtraOp = true;
2505 break;
2506 case ISD::SUBC:
2507 Opc = M68kISD::SUB;
2508 break;
2509 case ISD::SUBE:
2510 Opc = M68kISD::SUBX;
2511 ExtraOp = true;
2512 break;
2513 }
2514
2515 if (!ExtraOp)
2516 return DAG.getNode(Opc, SDLoc(Op), VTs, Op.getOperand(0), Op.getOperand(1));
2517 return DAG.getNode(Opc, SDLoc(Op), VTs, Op.getOperand(0), Op.getOperand(1),
2518 Op.getOperand(2));
2519}
2520
2521// ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as
2522// their target countpart wrapped in the M68kISD::Wrapper node. Suppose N is
2523// one of the above mentioned nodes. It has to be wrapped because otherwise
2524// Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only
2525// be used to form addressing mode. These wrapped nodes will be selected
2526// into MOV32ri.
2527SDValue M68kTargetLowering::LowerConstantPool(SDValue Op,
2528 SelectionDAG &DAG) const {
2529 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
2530
2531 // In PIC mode (unless we're in PCRel PIC mode) we add an offset to the
2532 // global base reg.
2533 unsigned char OpFlag = Subtarget.classifyLocalReference(nullptr);
2534
2535 unsigned WrapperKind = M68kISD::Wrapper;
2536 if (M68kII::isPCRelGlobalReference(OpFlag)) {
2537 WrapperKind = M68kISD::WrapperPC;
2538 }
2539
2540 MVT PtrVT = getPointerTy(DAG.getDataLayout());
2542 CP->getConstVal(), PtrVT, CP->getAlign(), CP->getOffset(), OpFlag);
2543
2544 SDLoc DL(CP);
2545 Result = DAG.getNode(WrapperKind, DL, PtrVT, Result);
2546
2547 // With PIC, the address is actually $g + Offset.
2549 Result = DAG.getNode(ISD::ADD, DL, PtrVT,
2550 DAG.getNode(M68kISD::GLOBAL_BASE_REG, SDLoc(), PtrVT),
2551 Result);
2552 }
2553
2554 return Result;
2555}
2556
2557SDValue M68kTargetLowering::LowerExternalSymbol(SDValue Op,
2558 SelectionDAG &DAG) const {
2559 const char *Sym = cast<ExternalSymbolSDNode>(Op)->getSymbol();
2560
2561 // In PIC mode (unless we're in PCRel PIC mode) we add an offset to the
2562 // global base reg.
2564 unsigned char OpFlag = Subtarget.classifyExternalReference(*Mod);
2565
2566 unsigned WrapperKind = M68kISD::Wrapper;
2567 if (M68kII::isPCRelGlobalReference(OpFlag)) {
2568 WrapperKind = M68kISD::WrapperPC;
2569 }
2570
2571 auto PtrVT = getPointerTy(DAG.getDataLayout());
2572 SDValue Result = DAG.getTargetExternalSymbol(Sym, PtrVT, OpFlag);
2573
2574 SDLoc DL(Op);
2575 Result = DAG.getNode(WrapperKind, DL, PtrVT, Result);
2576
2577 // With PIC, the address is actually $g + Offset.
2579 Result = DAG.getNode(ISD::ADD, DL, PtrVT,
2580 DAG.getNode(M68kISD::GLOBAL_BASE_REG, SDLoc(), PtrVT),
2581 Result);
2582 }
2583
2584 // For symbols that require a load from a stub to get the address, emit the
2585 // load.
2586 if (M68kII::isGlobalStubReference(OpFlag)) {
2587 Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Result,
2589 }
2590
2591 return Result;
2592}
2593
2594SDValue M68kTargetLowering::LowerBlockAddress(SDValue Op,
2595 SelectionDAG &DAG) const {
2596 unsigned char OpFlags = Subtarget.classifyBlockAddressReference();
2597 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
2598 int64_t Offset = cast<BlockAddressSDNode>(Op)->getOffset();
2599 SDLoc DL(Op);
2600 auto PtrVT = getPointerTy(DAG.getDataLayout());
2601
2602 // Create the TargetBlockAddressAddress node.
2603 SDValue Result = DAG.getTargetBlockAddress(BA, PtrVT, Offset, OpFlags);
2604
2605 if (M68kII::isPCRelBlockReference(OpFlags)) {
2606 Result = DAG.getNode(M68kISD::WrapperPC, DL, PtrVT, Result);
2607 } else {
2608 Result = DAG.getNode(M68kISD::Wrapper, DL, PtrVT, Result);
2609 }
2610
2611 // With PIC, the address is actually $g + Offset.
2612 if (M68kII::isGlobalRelativeToPICBase(OpFlags)) {
2613 Result =
2614 DAG.getNode(ISD::ADD, DL, PtrVT,
2615 DAG.getNode(M68kISD::GLOBAL_BASE_REG, DL, PtrVT), Result);
2616 }
2617
2618 return Result;
2619}
2620
2621SDValue M68kTargetLowering::LowerGlobalAddress(const GlobalValue *GV,
2622 const SDLoc &DL, int64_t Offset,
2623 SelectionDAG &DAG) const {
2624 unsigned char OpFlags = Subtarget.classifyGlobalReference(GV);
2625 auto PtrVT = getPointerTy(DAG.getDataLayout());
2626
2627 // Create the TargetGlobalAddress node, folding in the constant
2628 // offset if it is legal.
2630 if (M68kII::isDirectGlobalReference(OpFlags)) {
2631 Result = DAG.getTargetGlobalAddress(GV, DL, PtrVT, Offset);
2632 Offset = 0;
2633 } else {
2634 Result = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, OpFlags);
2635 }
2636
2637 if (M68kII::isPCRelGlobalReference(OpFlags))
2638 Result = DAG.getNode(M68kISD::WrapperPC, DL, PtrVT, Result);
2639 else
2640 Result = DAG.getNode(M68kISD::Wrapper, DL, PtrVT, Result);
2641
2642 // With PIC, the address is actually $g + Offset.
2643 if (M68kII::isGlobalRelativeToPICBase(OpFlags)) {
2644 Result =
2645 DAG.getNode(ISD::ADD, DL, PtrVT,
2646 DAG.getNode(M68kISD::GLOBAL_BASE_REG, DL, PtrVT), Result);
2647 }
2648
2649 // For globals that require a load from a stub to get the address, emit the
2650 // load.
2651 if (M68kII::isGlobalStubReference(OpFlags)) {
2652 Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Result,
2654 }
2655
2656 // If there was a non-zero offset that we didn't fold, create an explicit
2657 // addition for it.
2658 if (Offset != 0) {
2659 Result = DAG.getNode(ISD::ADD, DL, PtrVT, Result,
2660 DAG.getConstant(Offset, DL, PtrVT));
2661 }
2662
2663 return Result;
2664}
2665
2666SDValue M68kTargetLowering::LowerGlobalAddress(SDValue Op,
2667 SelectionDAG &DAG) const {
2668 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
2669 int64_t Offset = cast<GlobalAddressSDNode>(Op)->getOffset();
2670 return LowerGlobalAddress(GV, SDLoc(Op), Offset, DAG);
2671}
2672
2673//===----------------------------------------------------------------------===//
2674// Custom Lower Jump Table
2675//===----------------------------------------------------------------------===//
2676
2677SDValue M68kTargetLowering::LowerJumpTable(SDValue Op,
2678 SelectionDAG &DAG) const {
2679 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
2680
2681 // In PIC mode (unless we're in PCRel PIC mode) we add an offset to the
2682 // global base reg.
2683 unsigned char OpFlag = Subtarget.classifyLocalReference(nullptr);
2684
2685 unsigned WrapperKind = M68kISD::Wrapper;
2686 if (M68kII::isPCRelGlobalReference(OpFlag)) {
2687 WrapperKind = M68kISD::WrapperPC;
2688 }
2689
2690 auto PtrVT = getPointerTy(DAG.getDataLayout());
2691 SDValue Result = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, OpFlag);
2692 SDLoc DL(JT);
2693 Result = DAG.getNode(WrapperKind, DL, PtrVT, Result);
2694
2695 // With PIC, the address is actually $g + Offset.
2697 Result = DAG.getNode(ISD::ADD, DL, PtrVT,
2698 DAG.getNode(M68kISD::GLOBAL_BASE_REG, SDLoc(), PtrVT),
2699 Result);
2700 }
2701
2702 return Result;
2703}
2704
2706 return Subtarget.getJumpTableEncoding();
2707}
2708
2710 const MachineJumpTableInfo *MJTI, const MachineBasicBlock *MBB,
2711 unsigned uid, MCContext &Ctx) const {
2713 Ctx);
2714}
2715
2717 SelectionDAG &DAG) const {
2721
2722 // MachineJumpTableInfo::EK_LabelDifference32 entry
2723 return Table;
2724}
2725
2726// NOTE This only used for MachineJumpTableInfo::EK_LabelDifference32 entries
2728 const MachineFunction *MF, unsigned JTI, MCContext &Ctx) const {
2729 return MCSymbolRefExpr::create(MF->getJTISymbol(JTI, Ctx), Ctx);
2730}
2731
2734 if (Constraint.size() > 0) {
2735 switch (Constraint[0]) {
2736 case 'a':
2737 case 'd':
2738 return C_RegisterClass;
2739 case 'I':
2740 case 'J':
2741 case 'K':
2742 case 'L':
2743 case 'M':
2744 case 'N':
2745 case 'O':
2746 case 'P':
2747 return C_Immediate;
2748 case 'C':
2749 if (Constraint.size() == 2)
2750 switch (Constraint[1]) {
2751 case '0':
2752 case 'i':
2753 case 'j':
2754 return C_Immediate;
2755 default:
2756 break;
2757 }
2758 break;
2759 default:
2760 break;
2761 }
2762 }
2763
2764 return TargetLowering::getConstraintType(Constraint);
2765}
2766
2768 std::string &Constraint,
2769 std::vector<SDValue> &Ops,
2770 SelectionDAG &DAG) const {
2771 SDValue Result;
2772
2773 if (Constraint.size() == 1) {
2774 // Constant constraints
2775 switch (Constraint[0]) {
2776 case 'I':
2777 case 'J':
2778 case 'K':
2779 case 'L':
2780 case 'M':
2781 case 'N':
2782 case 'O':
2783 case 'P': {
2784 auto *C = dyn_cast<ConstantSDNode>(Op);
2785 if (!C)
2786 return;
2787
2788 int64_t Val = C->getSExtValue();
2789 switch (Constraint[0]) {
2790 case 'I': // constant integer in the range [1,8]
2791 if (Val > 0 && Val <= 8)
2792 break;
2793 return;
2794 case 'J': // constant signed 16-bit integer
2795 if (isInt<16>(Val))
2796 break;
2797 return;
2798 case 'K': // constant that is NOT in the range of [-0x80, 0x80)
2799 if (Val < -0x80 || Val >= 0x80)
2800 break;
2801 return;
2802 case 'L': // constant integer in the range [-8,-1]
2803 if (Val < 0 && Val >= -8)
2804 break;
2805 return;
2806 case 'M': // constant that is NOT in the range of [-0x100, 0x100]
2807 if (Val < -0x100 || Val >= 0x100)
2808 break;
2809 return;
2810 case 'N': // constant integer in the range [24,31]
2811 if (Val >= 24 && Val <= 31)
2812 break;
2813 return;
2814 case 'O': // constant integer 16
2815 if (Val == 16)
2816 break;
2817 return;
2818 case 'P': // constant integer in the range [8,15]
2819 if (Val >= 8 && Val <= 15)
2820 break;
2821 return;
2822 default:
2823 llvm_unreachable("Unhandled constant constraint");
2824 }
2825
2826 Result = DAG.getTargetConstant(Val, SDLoc(Op), Op.getValueType());
2827 break;
2828 }
2829 default:
2830 break;
2831 }
2832 }
2833
2834 if (Constraint.size() == 2) {
2835 switch (Constraint[0]) {
2836 case 'C':
2837 // Constant constraints start with 'C'
2838 switch (Constraint[1]) {
2839 case '0':
2840 case 'i':
2841 case 'j': {
2842 auto *C = dyn_cast<ConstantSDNode>(Op);
2843 if (!C)
2844 break;
2845
2846 int64_t Val = C->getSExtValue();
2847 switch (Constraint[1]) {
2848 case '0': // constant integer 0
2849 if (!Val)
2850 break;
2851 return;
2852 case 'i': // constant integer
2853 break;
2854 case 'j': // integer constant that doesn't fit in 16 bits
2855 if (!isInt<16>(C->getSExtValue()))
2856 break;
2857 return;
2858 default:
2859 llvm_unreachable("Unhandled constant constraint");
2860 }
2861
2862 Result = DAG.getTargetConstant(Val, SDLoc(Op), Op.getValueType());
2863 break;
2864 }
2865 default:
2866 break;
2867 }
2868 break;
2869 default:
2870 break;
2871 }
2872 }
2873
2874 if (Result.getNode()) {
2875 Ops.push_back(Result);
2876 return;
2877 }
2878
2879 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
2880}
2881
2882std::pair<unsigned, const TargetRegisterClass *>
2884 StringRef Constraint,
2885 MVT VT) const {
2886 if (Constraint.size() == 1) {
2887 switch (Constraint[0]) {
2888 case 'r':
2889 case 'd':
2890 switch (VT.SimpleTy) {
2891 case MVT::i8:
2892 return std::make_pair(0U, &M68k::DR8RegClass);
2893 case MVT::i16:
2894 return std::make_pair(0U, &M68k::DR16RegClass);
2895 case MVT::i32:
2896 return std::make_pair(0U, &M68k::DR32RegClass);
2897 default:
2898 break;
2899 }
2900 break;
2901 case 'a':
2902 switch (VT.SimpleTy) {
2903 case MVT::i16:
2904 return std::make_pair(0U, &M68k::AR16RegClass);
2905 case MVT::i32:
2906 return std::make_pair(0U, &M68k::AR32RegClass);
2907 default:
2908 break;
2909 }
2910 break;
2911 default:
2912 break;
2913 }
2914 }
2915
2916 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
2917}
2918
2919/// Determines whether the callee is required to pop its own arguments.
2920/// Callee pop is necessary to support tail calls.
2921bool M68k::isCalleePop(CallingConv::ID CallingConv, bool IsVarArg,
2922 bool GuaranteeTCO) {
2923 return false;
2924}
2925
2926// Return true if it is OK for this CMOV pseudo-opcode to be cascaded
2927// together with other CMOV pseudo-opcodes into a single basic-block with
2928// conditional jump around it.
2930 switch (MI.getOpcode()) {
2931 case M68k::CMOV8d:
2932 case M68k::CMOV16d:
2933 case M68k::CMOV32r:
2934 return true;
2935
2936 default:
2937 return false;
2938 }
2939}
2940
2941// The CCR operand of SelectItr might be missing a kill marker
2942// because there were multiple uses of CCR, and ISel didn't know
2943// which to mark. Figure out whether SelectItr should have had a
2944// kill marker, and set it if it should. Returns the correct kill
2945// marker value.
2948 const TargetRegisterInfo *TRI) {
2949 // Scan forward through BB for a use/def of CCR.
2950 MachineBasicBlock::iterator miI(std::next(SelectItr));
2951 for (MachineBasicBlock::iterator miE = BB->end(); miI != miE; ++miI) {
2952 const MachineInstr &mi = *miI;
2953 if (mi.readsRegister(M68k::CCR))
2954 return false;
2955 if (mi.definesRegister(M68k::CCR))
2956 break; // Should have kill-flag - update below.
2957 }
2958
2959 // If we hit the end of the block, check whether CCR is live into a
2960 // successor.
2961 if (miI == BB->end())
2962 for (const auto *SBB : BB->successors())
2963 if (SBB->isLiveIn(M68k::CCR))
2964 return false;
2965
2966 // We found a def, or hit the end of the basic block and CCR wasn't live
2967 // out. SelectMI should have a kill flag on CCR.
2968 SelectItr->addRegisterKilled(M68k::CCR, TRI);
2969 return true;
2970}
2971
2973M68kTargetLowering::EmitLoweredSelect(MachineInstr &MI,
2974 MachineBasicBlock *MBB) const {
2975 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
2976 DebugLoc DL = MI.getDebugLoc();
2977
2978 // To "insert" a SELECT_CC instruction, we actually have to insert the
2979 // diamond control-flow pattern. The incoming instruction knows the
2980 // destination vreg to set, the condition code register to branch on, the
2981 // true/false values to select between, and a branch opcode to use.
2982 const BasicBlock *BB = MBB->getBasicBlock();
2984
2985 // ThisMBB:
2986 // ...
2987 // TrueVal = ...
2988 // cmp ccX, r1, r2
2989 // bcc Copy1MBB
2990 // fallthrough --> Copy0MBB
2991 MachineBasicBlock *ThisMBB = MBB;
2993
2994 // This code lowers all pseudo-CMOV instructions. Generally it lowers these
2995 // as described above, by inserting a MBB, and then making a PHI at the join
2996 // point to select the true and false operands of the CMOV in the PHI.
2997 //
2998 // The code also handles two different cases of multiple CMOV opcodes
2999 // in a row.
3000 //
3001 // Case 1:
3002 // In this case, there are multiple CMOVs in a row, all which are based on
3003 // the same condition setting (or the exact opposite condition setting).
3004 // In this case we can lower all the CMOVs using a single inserted MBB, and
3005 // then make a number of PHIs at the join point to model the CMOVs. The only
3006 // trickiness here, is that in a case like:
3007 //
3008 // t2 = CMOV cond1 t1, f1
3009 // t3 = CMOV cond1 t2, f2
3010 //
3011 // when rewriting this into PHIs, we have to perform some renaming on the
3012 // temps since you cannot have a PHI operand refer to a PHI result earlier
3013 // in the same block. The "simple" but wrong lowering would be:
3014 //
3015 // t2 = PHI t1(BB1), f1(BB2)
3016 // t3 = PHI t2(BB1), f2(BB2)
3017 //
3018 // but clearly t2 is not defined in BB1, so that is incorrect. The proper
3019 // renaming is to note that on the path through BB1, t2 is really just a
3020 // copy of t1, and do that renaming, properly generating:
3021 //
3022 // t2 = PHI t1(BB1), f1(BB2)
3023 // t3 = PHI t1(BB1), f2(BB2)
3024 //
3025 // Case 2, we lower cascaded CMOVs such as
3026 //
3027 // (CMOV (CMOV F, T, cc1), T, cc2)
3028 //
3029 // to two successives branches.
3030 MachineInstr *CascadedCMOV = nullptr;
3031 MachineInstr *LastCMOV = &MI;
3032 M68k::CondCode CC = M68k::CondCode(MI.getOperand(3).getImm());
3035 std::next(MachineBasicBlock::iterator(MI));
3036
3037 // Check for case 1, where there are multiple CMOVs with the same condition
3038 // first. Of the two cases of multiple CMOV lowerings, case 1 reduces the
3039 // number of jumps the most.
3040
3041 if (isCMOVPseudo(MI)) {
3042 // See if we have a string of CMOVS with the same condition.
3043 while (NextMIIt != MBB->end() && isCMOVPseudo(*NextMIIt) &&
3044 (NextMIIt->getOperand(3).getImm() == CC ||
3045 NextMIIt->getOperand(3).getImm() == OppCC)) {
3046 LastCMOV = &*NextMIIt;
3047 ++NextMIIt;
3048 }
3049 }
3050
3051 // This checks for case 2, but only do this if we didn't already find
3052 // case 1, as indicated by LastCMOV == MI.
3053 if (LastCMOV == &MI && NextMIIt != MBB->end() &&
3054 NextMIIt->getOpcode() == MI.getOpcode() &&
3055 NextMIIt->getOperand(2).getReg() == MI.getOperand(2).getReg() &&
3056 NextMIIt->getOperand(1).getReg() == MI.getOperand(0).getReg() &&
3057 NextMIIt->getOperand(1).isKill()) {
3058 CascadedCMOV = &*NextMIIt;
3059 }
3060
3061 MachineBasicBlock *Jcc1MBB = nullptr;
3062
3063 // If we have a cascaded CMOV, we lower it to two successive branches to
3064 // the same block. CCR is used by both, so mark it as live in the second.
3065 if (CascadedCMOV) {
3066 Jcc1MBB = F->CreateMachineBasicBlock(BB);
3067 F->insert(It, Jcc1MBB);
3068 Jcc1MBB->addLiveIn(M68k::CCR);
3069 }
3070
3071 MachineBasicBlock *Copy0MBB = F->CreateMachineBasicBlock(BB);
3072 MachineBasicBlock *SinkMBB = F->CreateMachineBasicBlock(BB);
3073 F->insert(It, Copy0MBB);
3074 F->insert(It, SinkMBB);
3075
3076 // If the CCR register isn't dead in the terminator, then claim that it's
3077 // live into the sink and copy blocks.
3078 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
3079
3080 MachineInstr *LastCCRSUser = CascadedCMOV ? CascadedCMOV : LastCMOV;
3081 if (!LastCCRSUser->killsRegister(M68k::CCR) &&
3082 !checkAndUpdateCCRKill(LastCCRSUser, MBB, TRI)) {
3083 Copy0MBB->addLiveIn(M68k::CCR);
3084 SinkMBB->addLiveIn(M68k::CCR);
3085 }
3086
3087 // Transfer the remainder of MBB and its successor edges to SinkMBB.
3088 SinkMBB->splice(SinkMBB->begin(), MBB,
3089 std::next(MachineBasicBlock::iterator(LastCMOV)), MBB->end());
3091
3092 // Add the true and fallthrough blocks as its successors.
3093 if (CascadedCMOV) {
3094 // The fallthrough block may be Jcc1MBB, if we have a cascaded CMOV.
3095 MBB->addSuccessor(Jcc1MBB);
3096
3097 // In that case, Jcc1MBB will itself fallthrough the Copy0MBB, and
3098 // jump to the SinkMBB.
3099 Jcc1MBB->addSuccessor(Copy0MBB);
3100 Jcc1MBB->addSuccessor(SinkMBB);
3101 } else {
3102 MBB->addSuccessor(Copy0MBB);
3103 }
3104
3105 // The true block target of the first (or only) branch is always SinkMBB.
3106 MBB->addSuccessor(SinkMBB);
3107
3108 // Create the conditional branch instruction.
3109 unsigned Opc = M68k::GetCondBranchFromCond(CC);
3110 BuildMI(MBB, DL, TII->get(Opc)).addMBB(SinkMBB);
3111
3112 if (CascadedCMOV) {
3113 unsigned Opc2 = M68k::GetCondBranchFromCond(
3114 (M68k::CondCode)CascadedCMOV->getOperand(3).getImm());
3115 BuildMI(Jcc1MBB, DL, TII->get(Opc2)).addMBB(SinkMBB);
3116 }
3117
3118 // Copy0MBB:
3119 // %FalseValue = ...
3120 // # fallthrough to SinkMBB
3121 Copy0MBB->addSuccessor(SinkMBB);
3122
3123 // SinkMBB:
3124 // %Result = phi [ %FalseValue, Copy0MBB ], [ %TrueValue, ThisMBB ]
3125 // ...
3128 std::next(MachineBasicBlock::iterator(LastCMOV));
3129 MachineBasicBlock::iterator SinkInsertionPoint = SinkMBB->begin();
3132
3133 // As we are creating the PHIs, we have to be careful if there is more than
3134 // one. Later CMOVs may reference the results of earlier CMOVs, but later
3135 // PHIs have to reference the individual true/false inputs from earlier PHIs.
3136 // That also means that PHI construction must work forward from earlier to
3137 // later, and that the code must maintain a mapping from earlier PHI's
3138 // destination registers, and the registers that went into the PHI.
3139
3140 for (MachineBasicBlock::iterator MIIt = MIItBegin; MIIt != MIItEnd; ++MIIt) {
3141 Register DestReg = MIIt->getOperand(0).getReg();
3142 Register Op1Reg = MIIt->getOperand(1).getReg();
3143 Register Op2Reg = MIIt->getOperand(2).getReg();
3144
3145 // If this CMOV we are generating is the opposite condition from
3146 // the jump we generated, then we have to swap the operands for the
3147 // PHI that is going to be generated.
3148 if (MIIt->getOperand(3).getImm() == OppCC)
3149 std::swap(Op1Reg, Op2Reg);
3150
3151 if (RegRewriteTable.find(Op1Reg) != RegRewriteTable.end())
3152 Op1Reg = RegRewriteTable[Op1Reg].first;
3153
3154 if (RegRewriteTable.find(Op2Reg) != RegRewriteTable.end())
3155 Op2Reg = RegRewriteTable[Op2Reg].second;
3156
3157 MIB =
3158 BuildMI(*SinkMBB, SinkInsertionPoint, DL, TII->get(M68k::PHI), DestReg)
3159 .addReg(Op1Reg)
3160 .addMBB(Copy0MBB)
3161 .addReg(Op2Reg)
3162 .addMBB(ThisMBB);
3163
3164 // Add this PHI to the rewrite table.
3165 RegRewriteTable[DestReg] = std::make_pair(Op1Reg, Op2Reg);
3166 }
3167
3168 // If we have a cascaded CMOV, the second Jcc provides the same incoming
3169 // value as the first Jcc (the True operand of the SELECT_CC/CMOV nodes).
3170 if (CascadedCMOV) {
3171 MIB.addReg(MI.getOperand(2).getReg()).addMBB(Jcc1MBB);
3172 // Copy the PHI result to the register defined by the second CMOV.
3173 BuildMI(*SinkMBB, std::next(MachineBasicBlock::iterator(MIB.getInstr())),
3174 DL, TII->get(TargetOpcode::COPY),
3175 CascadedCMOV->getOperand(0).getReg())
3176 .addReg(MI.getOperand(0).getReg());
3177 CascadedCMOV->eraseFromParent();
3178 }
3179
3180 // Now remove the CMOV(s).
3181 for (MachineBasicBlock::iterator MIIt = MIItBegin; MIIt != MIItEnd;)
3182 (MIIt++)->eraseFromParent();
3183
3184 return SinkMBB;
3185}
3186
3188M68kTargetLowering::EmitLoweredSegAlloca(MachineInstr &MI,
3189 MachineBasicBlock *BB) const {
3190 llvm_unreachable("Cannot lower Segmented Stack Alloca with stack-split on");
3191}
3192
3195 MachineBasicBlock *BB) const {
3196 switch (MI.getOpcode()) {
3197 default:
3198 llvm_unreachable("Unexpected instr type to insert");
3199 case M68k::CMOV8d:
3200 case M68k::CMOV16d:
3201 case M68k::CMOV32r:
3202 return EmitLoweredSelect(MI, BB);
3203 case M68k::SALLOCA:
3204 return EmitLoweredSegAlloca(MI, BB);
3205 }
3206}
3207
3208SDValue M68kTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
3210 auto PtrVT = getPointerTy(MF.getDataLayout());
3212
3213 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
3214 SDLoc DL(Op);
3215
3216 // vastart just stores the address of the VarArgsFrameIndex slot into the
3217 // memory location argument.
3218 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
3219 return DAG.getStore(Op.getOperand(0), DL, FR, Op.getOperand(1),
3220 MachinePointerInfo(SV));
3221}
3222
3223// Lower dynamic stack allocation to _alloca call for Cygwin/Mingw targets.
3224// Calls to _alloca are needed to probe the stack when allocating more than 4k
3225// bytes in one go. Touching the stack at 4K increments is necessary to ensure
3226// that the guard pages used by the OS virtual memory manager are allocated in
3227// correct sequence.
3228SDValue M68kTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
3229 SelectionDAG &DAG) const {
3231 bool SplitStack = MF.shouldSplitStack();
3232
3233 SDLoc DL(Op);
3234
3235 // Get the inputs.
3236 SDNode *Node = Op.getNode();
3237 SDValue Chain = Op.getOperand(0);
3238 SDValue Size = Op.getOperand(1);
3239 unsigned Align = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue();
3240 EVT VT = Node->getValueType(0);
3241
3242 // Chain the dynamic stack allocation so that it doesn't modify the stack
3243 // pointer when other instructions are using the stack.
3244 Chain = DAG.getCALLSEQ_START(Chain, 0, 0, DL);
3245
3246 SDValue Result;
3247 if (SplitStack) {
3248 auto &MRI = MF.getRegInfo();
3249 auto SPTy = getPointerTy(DAG.getDataLayout());
3250 auto *ARClass = getRegClassFor(SPTy);
3251 Register Vreg = MRI.createVirtualRegister(ARClass);
3252 Chain = DAG.getCopyToReg(Chain, DL, Vreg, Size);
3253 Result = DAG.getNode(M68kISD::SEG_ALLOCA, DL, SPTy, Chain,
3254 DAG.getRegister(Vreg, SPTy));
3255 } else {
3256 auto &TLI = DAG.getTargetLoweringInfo();
3258 assert(SPReg && "Target cannot require DYNAMIC_STACKALLOC expansion and"
3259 " not tell us which reg is the stack pointer!");
3260
3261 SDValue SP = DAG.getCopyFromReg(Chain, DL, SPReg, VT);
3262 Chain = SP.getValue(1);
3263 const TargetFrameLowering &TFI = *Subtarget.getFrameLowering();
3264 unsigned StackAlign = TFI.getStackAlignment();
3265 Result = DAG.getNode(ISD::SUB, DL, VT, SP, Size); // Value
3266 if (Align > StackAlign)
3267 Result = DAG.getNode(ISD::AND, DL, VT, Result,
3268 DAG.getConstant(-(uint64_t)Align, DL, VT));
3269 Chain = DAG.getCopyToReg(Chain, DL, SPReg, Result); // Output chain
3270 }
3271
3272 Chain = DAG.getCALLSEQ_END(Chain, 0, 0, SDValue(), DL);
3273
3274 SDValue Ops[2] = {Result, Chain};
3275 return DAG.getMergeValues(Ops, DL);
3276}
3277
3278SDValue M68kTargetLowering::LowerShiftLeftParts(SDValue Op,
3279 SelectionDAG &DAG) const {
3280 SDLoc DL(Op);
3281 SDValue Lo = Op.getOperand(0);
3282 SDValue Hi = Op.getOperand(1);
3283 SDValue Shamt = Op.getOperand(2);
3284 EVT VT = Lo.getValueType();
3285
3286 // if Shamt - register size < 0: // Shamt < register size
3287 // Lo = Lo << Shamt
3288 // Hi = (Hi << Shamt) | ((Lo >>u 1) >>u (register size - 1 ^ Shamt))
3289 // else:
3290 // Lo = 0
3291 // Hi = Lo << (Shamt - register size)
3292
3293 SDValue Zero = DAG.getConstant(0, DL, VT);
3294 SDValue One = DAG.getConstant(1, DL, VT);
3295 SDValue MinusRegisterSize = DAG.getConstant(-32, DL, VT);
3296 SDValue RegisterSizeMinus1 = DAG.getConstant(32 - 1, DL, VT);
3297 SDValue ShamtMinusRegisterSize =
3298 DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusRegisterSize);
3299 SDValue RegisterSizeMinus1Shamt =
3300 DAG.getNode(ISD::XOR, DL, VT, RegisterSizeMinus1, Shamt);
3301
3302 SDValue LoTrue = DAG.getNode(ISD::SHL, DL, VT, Lo, Shamt);
3303 SDValue ShiftRight1Lo = DAG.getNode(ISD::SRL, DL, VT, Lo, One);
3304 SDValue ShiftRightLo =
3305 DAG.getNode(ISD::SRL, DL, VT, ShiftRight1Lo, RegisterSizeMinus1Shamt);
3306 SDValue ShiftLeftHi = DAG.getNode(ISD::SHL, DL, VT, Hi, Shamt);
3307 SDValue HiTrue = DAG.getNode(ISD::OR, DL, VT, ShiftLeftHi, ShiftRightLo);
3308 SDValue HiFalse = DAG.getNode(ISD::SHL, DL, VT, Lo, ShamtMinusRegisterSize);
3309
3310 SDValue CC =
3311 DAG.getSetCC(DL, MVT::i8, ShamtMinusRegisterSize, Zero, ISD::SETLT);
3312
3313 Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, Zero);
3314 Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse);
3315
3316 return DAG.getMergeValues({Lo, Hi}, DL);
3317}
3318
3319SDValue M68kTargetLowering::LowerShiftRightParts(SDValue Op, SelectionDAG &DAG,
3320 bool IsSRA) const {
3321 SDLoc DL(Op);
3322 SDValue Lo = Op.getOperand(0);
3323 SDValue Hi = Op.getOperand(1);
3324 SDValue Shamt = Op.getOperand(2);
3325 EVT VT = Lo.getValueType();
3326
3327 // SRA expansion:
3328 // if Shamt - register size < 0: // Shamt < register size
3329 // Lo = (Lo >>u Shamt) | ((Hi << 1) << (register size - 1 ^ Shamt))
3330 // Hi = Hi >>s Shamt
3331 // else:
3332 // Lo = Hi >>s (Shamt - register size);
3333 // Hi = Hi >>s (register size - 1)
3334 //
3335 // SRL expansion:
3336 // if Shamt - register size < 0: // Shamt < register size
3337 // Lo = (Lo >>u Shamt) | ((Hi << 1) << (register size - 1 ^ Shamt))
3338 // Hi = Hi >>u Shamt
3339 // else:
3340 // Lo = Hi >>u (Shamt - register size);
3341 // Hi = 0;
3342
3343 unsigned ShiftRightOp = IsSRA ? ISD::SRA : ISD::SRL;
3344
3345 SDValue Zero = DAG.getConstant(0, DL, VT);
3346 SDValue One = DAG.getConstant(1, DL, VT);
3347 SDValue MinusRegisterSize = DAG.getConstant(-32, DL, VT);
3348 SDValue RegisterSizeMinus1 = DAG.getConstant(32 - 1, DL, VT);
3349 SDValue ShamtMinusRegisterSize =
3350 DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusRegisterSize);
3351 SDValue RegisterSizeMinus1Shamt =
3352 DAG.getNode(ISD::XOR, DL, VT, RegisterSizeMinus1, Shamt);
3353
3354 SDValue ShiftRightLo = DAG.getNode(ISD::SRL, DL, VT, Lo, Shamt);
3355 SDValue ShiftLeftHi1 = DAG.getNode(ISD::SHL, DL, VT, Hi, One);
3356 SDValue ShiftLeftHi =
3357 DAG.getNode(ISD::SHL, DL, VT, ShiftLeftHi1, RegisterSizeMinus1Shamt);
3358 SDValue LoTrue = DAG.getNode(ISD::OR, DL, VT, ShiftRightLo, ShiftLeftHi);
3359 SDValue HiTrue = DAG.getNode(ShiftRightOp, DL, VT, Hi, Shamt);
3360 SDValue LoFalse =
3361 DAG.getNode(ShiftRightOp, DL, VT, Hi, ShamtMinusRegisterSize);
3362 SDValue HiFalse =
3363 IsSRA ? DAG.getNode(ISD::SRA, DL, VT, Hi, RegisterSizeMinus1) : Zero;
3364
3365 SDValue CC =
3366 DAG.getSetCC(DL, MVT::i8, ShamtMinusRegisterSize, Zero, ISD::SETLT);
3367
3368 Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, LoFalse);
3369 Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse);
3370
3371 return DAG.getMergeValues({Lo, Hi}, DL);
3372}
3373
3374//===----------------------------------------------------------------------===//
3375// DAG Combine
3376//===----------------------------------------------------------------------===//
3377
3379 SelectionDAG &DAG) {
3380 return DAG.getNode(M68kISD::SETCC, dl, MVT::i8,
3381 DAG.getConstant(Cond, dl, MVT::i8), CCR);
3382}
3383// When legalizing carry, we create carries via add X, -1
3384// If that comes from an actual carry, via setcc, we use the
3385// carry directly.
3387 if (CCR.getOpcode() == M68kISD::ADD) {
3388 if (isAllOnesConstant(CCR.getOperand(1))) {
3389 SDValue Carry = CCR.getOperand(0);
3390 while (Carry.getOpcode() == ISD::TRUNCATE ||
3391 Carry.getOpcode() == ISD::ZERO_EXTEND ||
3392 Carry.getOpcode() == ISD::SIGN_EXTEND ||
3393 Carry.getOpcode() == ISD::ANY_EXTEND ||
3394 (Carry.getOpcode() == ISD::AND &&
3395 isOneConstant(Carry.getOperand(1))))
3396 Carry = Carry.getOperand(0);
3397 if (Carry.getOpcode() == M68kISD::SETCC ||
3398 Carry.getOpcode() == M68kISD::SETCC_CARRY) {
3399 if (Carry.getConstantOperandVal(0) == M68k::COND_CS)
3400 return Carry.getOperand(1);
3401 }
3402 }
3403 }
3404
3405 return SDValue();
3406}
3407
3408/// Optimize a CCR definition used according to the condition code \p CC into
3409/// a simpler CCR value, potentially returning a new \p CC and replacing uses
3410/// of chain values.
3412 SelectionDAG &DAG,
3413 const M68kSubtarget &Subtarget) {
3414 if (CC == M68k::COND_CS)
3415 if (SDValue Flags = combineCarryThroughADD(CCR))
3416 return Flags;
3417
3418 return SDValue();
3419}
3420
3421// Optimize RES = M68kISD::SETCC CONDCODE, CCR_INPUT
3423 const M68kSubtarget &Subtarget) {
3424 SDLoc DL(N);
3425 M68k::CondCode CC = M68k::CondCode(N->getConstantOperandVal(0));
3426 SDValue CCR = N->getOperand(1);
3427
3428 // Try to simplify the CCR and condition code operands.
3429 if (SDValue Flags = combineSetCCCCR(CCR, CC, DAG, Subtarget))
3430 return getSETCC(CC, Flags, DL, DAG);
3431
3432 return SDValue();
3433}
3435 const M68kSubtarget &Subtarget) {
3436 SDLoc DL(N);
3437 M68k::CondCode CC = M68k::CondCode(N->getConstantOperandVal(2));
3438 SDValue CCR = N->getOperand(3);
3439
3440 // Try to simplify the CCR and condition code operands.
3441 // Make sure to not keep references to operands, as combineSetCCCCR can
3442 // RAUW them under us.
3443 if (SDValue Flags = combineSetCCCCR(CCR, CC, DAG, Subtarget)) {
3445 return DAG.getNode(M68kISD::BRCOND, DL, N->getVTList(), N->getOperand(0),
3446 N->getOperand(1), Cond, Flags);
3447 }
3448
3449 return SDValue();
3450}
3451
3453 if (SDValue Flags = combineCarryThroughADD(N->getOperand(2))) {
3454 MVT VT = N->getSimpleValueType(0);
3455 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
3456 return DAG.getNode(M68kISD::SUBX, SDLoc(N), VTs, N->getOperand(0),
3457 N->getOperand(1), Flags);
3458 }
3459
3460 return SDValue();
3461}
3462
3463// Optimize RES, CCR = M68kISD::ADDX LHS, RHS, CCR
3466 if (SDValue Flags = combineCarryThroughADD(N->getOperand(2))) {
3467 MVT VT = N->getSimpleValueType(0);
3468 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
3469 return DAG.getNode(M68kISD::ADDX, SDLoc(N), VTs, N->getOperand(0),
3470 N->getOperand(1), Flags);
3471 }
3472
3473 return SDValue();
3474}
3475
3476SDValue M68kTargetLowering::PerformDAGCombine(SDNode *N,
3477 DAGCombinerInfo &DCI) const {
3478 SelectionDAG &DAG = DCI.DAG;
3479 switch (N->getOpcode()) {
3480 case M68kISD::SUBX:
3481 return combineSUBX(N, DAG);
3482 case M68kISD::ADDX:
3483 return combineADDX(N, DAG, DCI);
3484 case M68kISD::SETCC:
3485 return combineM68kSetCC(N, DAG, Subtarget);
3486 case M68kISD::BRCOND:
3487 return combineM68kBrCond(N, DAG, Subtarget);
3488 }
3489
3490 return SDValue();
3491}
3492
3493//===----------------------------------------------------------------------===//
3494// M68kISD Node Names
3495//===----------------------------------------------------------------------===//
3496const char *M68kTargetLowering::getTargetNodeName(unsigned Opcode) const {
3497 switch (Opcode) {
3498 case M68kISD::CALL:
3499 return "M68kISD::CALL";
3500 case M68kISD::TAIL_CALL:
3501 return "M68kISD::TAIL_CALL";
3502 case M68kISD::RET:
3503 return "M68kISD::RET";
3504 case M68kISD::TC_RETURN:
3505 return "M68kISD::TC_RETURN";
3506 case M68kISD::ADD:
3507 return "M68kISD::ADD";
3508 case M68kISD::SUB:
3509 return "M68kISD::SUB";
3510 case M68kISD::ADDX:
3511 return "M68kISD::ADDX";
3512 case M68kISD::SUBX:
3513 return "M68kISD::SUBX";
3514 case M68kISD::SMUL:
3515 return "M68kISD::SMUL";
3516 case M68kISD::UMUL:
3517 return "M68kISD::UMUL";
3518 case M68kISD::OR:
3519 return "M68kISD::OR";
3520 case M68kISD::XOR:
3521 return "M68kISD::XOR";
3522 case M68kISD::AND:
3523 return "M68kISD::AND";
3524 case M68kISD::CMP:
3525 return "M68kISD::CMP";
3526 case M68kISD::BTST:
3527 return "M68kISD::BTST";
3528 case M68kISD::SELECT:
3529 return "M68kISD::SELECT";
3530 case M68kISD::CMOV:
3531 return "M68kISD::CMOV";
3532 case M68kISD::BRCOND:
3533 return "M68kISD::BRCOND";
3534 case M68kISD::SETCC:
3535 return "M68kISD::SETCC";
3537 return "M68kISD::SETCC_CARRY";
3539 return "M68kISD::GLOBAL_BASE_REG";
3540 case M68kISD::Wrapper:
3541 return "M68kISD::Wrapper";
3542 case M68kISD::WrapperPC:
3543 return "M68kISD::WrapperPC";
3545 return "M68kISD::SEG_ALLOCA";
3546 default:
3547 return NULL;
3548 }
3549}
3550
3552 bool IsVarArg) const {
3553 if (Return)
3554 return RetCC_M68k_C;
3555 else
3556 return CC_M68k_C;
3557}
unsigned const MachineRegisterInfo * MRI
static bool canGuaranteeTCO(CallingConv::ID CC, bool GuaranteeTailCalls)
Return true if the calling convention is one that we can guarantee TCO for.
static bool mayTailCallThisCC(CallingConv::ID CC)
Return true if we might ever do TCO for calls with this calling convention.
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
amdgpu Simplify well known AMD library false FunctionCallee Callee
amdgpu Simplify well known AMD library false FunctionCallee Value * Arg
static bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags, MachineFrameInfo &MFI, const MachineRegisterInfo *MRI, const TargetInstrInfo *TII)
MatchingStackOffset - Return true if the given stack call argument is already available in the same p...
SmallVector< MachineOperand, 4 > Cond
return RetTy
uint64_t Size
const HexagonInstrInfo * TII
static SDValue CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain, ISD::ArgFlagsTy Flags, SelectionDAG &DAG, const SDLoc &dl)
CreateCopyOfByValArgument - Make a copy of an aggregate at address specified by "Src" to address "Dst...
IRTranslator LLVM IR MI
static LVOptions Options
Definition: LVOptions.cpp:25
This file contains the custom routines for the M68k Calling Convention that aren't done by tablegen.
static SDValue LowerTruncateToBTST(SDValue Op, ISD::CondCode CC, const SDLoc &DL, SelectionDAG &DAG)
static SDValue combineADDX(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI)
static bool isAndOrOfSetCCs(SDValue Op, unsigned &Opc)
Return true if node is an ISD::AND or ISD::OR of two M68k::SETcc nodes each of which has no other use...
static bool hasNonFlagsUse(SDValue Op)
return true if Op has a use that doesn't just read flags.
static bool isM68kCCUnsigned(unsigned M68kCC)
Return true if the condition is an unsigned comparison operation.
static StructReturnType callIsStructReturn(const SmallVectorImpl< ISD::OutputArg > &Outs)
static bool isXor1OfSetCC(SDValue Op)
Return true if node is an ISD::XOR of a M68kISD::SETCC and 1 and that the SETCC node has a single use...
static SDValue LowerAndToBTST(SDValue And, ISD::CondCode CC, const SDLoc &DL, SelectionDAG &DAG)
Result of 'and' is compared against zero. Change to a BTST node if possible.
static SDValue combineM68kBrCond(SDNode *N, SelectionDAG &DAG, const M68kSubtarget &Subtarget)
static M68k::CondCode TranslateIntegerM68kCC(ISD::CondCode SetCCOpcode)
static SDValue getSETCC(M68k::CondCode Cond, SDValue CCR, const SDLoc &dl, SelectionDAG &DAG)
static StructReturnType argsAreStructReturn(const SmallVectorImpl< ISD::InputArg > &Ins)
Determines whether a function uses struct return semantics.
static bool isCMOVPseudo(MachineInstr &MI)
static bool shouldGuaranteeTCO(CallingConv::ID CC, bool GuaranteedTailCallOpt)
Return true if the function is being made into a tailcall target by changing its ABI.
static bool isM68kLogicalCmp(SDValue Op)
Return true if opcode is a M68k logical comparison.
static SDValue combineM68kSetCC(SDNode *N, SelectionDAG &DAG, const M68kSubtarget &Subtarget)
static SDValue combineSetCCCCR(SDValue CCR, M68k::CondCode &CC, SelectionDAG &DAG, const M68kSubtarget &Subtarget)
Optimize a CCR definition used according to the condition code CC into a simpler CCR value,...
static SDValue combineCarryThroughADD(SDValue CCR)
static SDValue getBitTestCondition(SDValue Src, SDValue BitNo, ISD::CondCode CC, const SDLoc &DL, SelectionDAG &DAG)
Create a BTST (Bit Test) node - Test bit BitNo in Src and set condition according to equal/not-equal ...
StructReturnType
@ NotStructReturn
@ RegStructReturn
@ StackStructReturn
static bool isTruncWithZeroHighBitsInput(SDValue V, SelectionDAG &DAG)
static bool checkAndUpdateCCRKill(MachineBasicBlock::iterator SelectItr, MachineBasicBlock *BB, const TargetRegisterInfo *TRI)
static SDValue combineSUBX(SDNode *N, SelectionDAG &DAG)
static unsigned TranslateM68kCC(ISD::CondCode SetCCOpcode, const SDLoc &DL, bool IsFP, SDValue &LHS, SDValue &RHS, SelectionDAG &DAG)
Do a one-to-one translation of a ISD::CondCode to the M68k-specific condition code,...
This file defines the interfaces that M68k uses to lower LLVM code into a selection DAG.
This file declares the M68k specific subclass of MachineFunctionInfo.
This file declares the M68k specific subclass of TargetSubtargetInfo.
This file declares the M68k specific subclass of TargetMachine.
This file contains declarations for M68k ELF object file lowering.
#define F(x, y, z)
Definition: MD5.cpp:55
#define G(x, y, z)
Definition: MD5.cpp:56
unsigned const TargetRegisterInfo * TRI
#define T1
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
const char LLVMTargetMachineRef TM
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition: Statistic.h:167
Value * RHS
Value * LHS
Class for arbitrary precision integers.
Definition: APInt.h:75
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
Definition: APInt.h:214
static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet)
Constructs an APInt value that has the top hiBitsSet bits set.
Definition: APInt.h:279
This class represents an incoming formal argument to a Function.
Definition: Argument.h:28
an instruction that atomically reads a memory location, combines it with another value,...
Definition: Instructions.h:718
LLVM Basic Block Representation.
Definition: BasicBlock.h:56
The address of a basic block.
Definition: Constants.h:875
CCState - This class holds information needed while lowering arguments and return values.
static bool resultsCompatible(CallingConv::ID CalleeCC, CallingConv::ID CallerCC, MachineFunction &MF, LLVMContext &C, const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn CalleeFn, CCAssignFn CallerFn)
Returns true if the results of the two calling conventions are compatible.
CCValAssign - Represent assignment of one arg/retval to a location.
bool isRegLoc() const
unsigned getLocMemOffset() const
Register getLocReg() const
LocInfo getLocInfo() const
bool isMemLoc() const
bool isExtInLoc() const
unsigned getValNo() const
bool isMustTailCall() const
Tests if this call site must be tail call optimized.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:114
A debug info location.
Definition: DebugLoc.h:33
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:150
iterator end()
Definition: DenseMap.h:84
iterator_range< arg_iterator > args()
Definition: Function.h:790
Attribute getFnAttribute(Attribute::AttrKind Kind) const
Return the attribute for the given attribute kind.
Definition: Function.cpp:666
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
Definition: Function.h:641
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition: Function.h:237
bool hasStructRetAttr() const
Determine if the function returns a structure through first or second pointer argument.
Definition: Function.h:625
bool hasDLLImportStorageClass() const
Definition: GlobalValue.h:274
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:652
unsigned isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const override
TargetInstrInfo overrides.
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
SmallVectorImpl< ForwardedRegister > & getForwardedMustTailRegParms()
void setBytesToPopOnReturn(unsigned bytes)
void setArgumentStackSize(unsigned size)
unsigned char classifyExternalReference(const Module &M) const
Classify a external variable reference for the current subtarget according to how we should reference...
unsigned char classifyBlockAddressReference() const
Classify a blockaddress reference for the current subtarget according to how we should reference it i...
unsigned getSlotSize() const
getSlotSize - Stack slot size in bytes.
const M68kInstrInfo * getInstrInfo() const override
unsigned char classifyGlobalReference(const GlobalValue *GV, const Module &M) const
Classify a global variable reference for the current subtarget according to how we should reference i...
unsigned getJumpTableEncoding() const
unsigned char classifyLocalReference(const GlobalValue *GV) const
Classify a global variable reference for the current subtarget according to how we should reference i...
const M68kRegisterInfo * getRegisterInfo() const override
bool atLeastM68020() const
Definition: M68kSubtarget.h:86
unsigned char classifyGlobalFunctionReference(const GlobalValue *GV, const Module &M) const
Classify a global function reference for the current subtarget.
const M68kFrameLowering * getFrameLowering() const override
ConstraintType getConstraintType(StringRef ConstraintStr) const override
Given a constraint, return the type of constraint it is for this target.
MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *MBB) const override
This method should be implemented by targets that mark instructions with the 'usesCustomInserter' fla...
virtual MVT getScalarShiftAmountTy(const DataLayout &, EVT) const override
EVT is not used in-tree, but is used by out-of-tree target.
const MCExpr * LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI, const MachineBasicBlock *MBB, unsigned uid, MCContext &Ctx) const override
SDValue getPICJumpTableRelocBase(SDValue Table, SelectionDAG &DAG) const override
Returns relocation base for the given PIC jumptable.
const MCExpr * getPICJumpTableRelocBaseExpr(const MachineFunction *MF, unsigned JTI, MCContext &Ctx) const override
This returns the relocation base for the given PIC jumptable, the same as getPICJumpTableRelocBase,...
AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const override
Returns how the IR-level AtomicExpand pass should expand the given AtomicRMW, if at all.
CCAssignFn * getCCAssignFn(CallingConv::ID CC, bool Return, bool IsVarArg) const
M68kTargetLowering(const M68kTargetMachine &TM, const M68kSubtarget &STI)
void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const override
Lower the specified operand into the Ops vector.
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
Provide custom lowering hooks for some operations.
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const override
Return the value type to use for ISD::SETCC.
unsigned getJumpTableEncoding() const override
Return the entry encoding for a jump table in the current function.
const char * getTargetNodeName(unsigned Opcode) const override
This method returns the name of a target specific DAG node.
std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const override
Given a physical register constraint (e.g.
Context object for machine code objects.
Definition: MCContext.h:76
Base class for the full range of assembler expressions which are needed for parsing.
Definition: MCExpr.h:35
static const MCSymbolRefExpr * create(const MCSymbol *Symbol, MCContext &Ctx)
Definition: MCExpr.h:386
Machine Value Type.
SimpleValueType SimpleTy
static auto integer_valuetypes()
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
static MVT getIntegerVT(unsigned BitWidth)
MVT getScalarType() const
If this is a vector, return the element type, otherwise return this.
void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
MCSymbol * getSymbol() const
Return the MCSymbol for this basic block.
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
iterator_range< succ_iterator > successors()
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
MachineInstrBundleIterator< MachineInstr > iterator
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
void setObjectZExt(int ObjectIdx, bool IsZExt)
void setObjectSExt(int ObjectIdx, bool IsSExt)
void setHasTailCall(bool V=true)
bool isObjectZExt(int ObjectIdx) const
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
bool isObjectSExt(int ObjectIdx) const
int64_t getObjectOffset(int ObjectIdx) const
Return the assigned stack offset of the specified object from the incoming stack pointer.
bool isFixedObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to a fixed stack object.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MCSymbol * getJTISymbol(unsigned JTI, MCContext &Ctx, bool isLinkerPrivate=false) const
getJTISymbol - Return the MCSymbol for the specified non-empty jump table.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
const LLVMTargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
bool shouldSplitStack() const
Should we be emitting segmented stack stuff for the function.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
Register addLiveIn(MCRegister PReg, const TargetRegisterClass *RC)
addLiveIn - Add the specified physical register as a live-in value and create a corresponding virtual...
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
MachineInstr * getInstr() const
If conversion operators fail, use this method to get the MachineInstr explicitly.
Representation of each machine instruction.
Definition: MachineInstr.h:68
bool killsRegister(Register Reg, const TargetRegisterInfo *TRI=nullptr) const
Return true if the MachineInstr kills the specified register.
bool definesRegister(Register Reg, const TargetRegisterInfo *TRI=nullptr) const
Return true if the MachineInstr fully defines the specified register.
bool readsRegister(Register Reg, const TargetRegisterInfo *TRI=nullptr) const
Return true if the MachineInstr reads the specified register.
void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:526
@ EK_Custom32
EK_Custom32 - Each entry is a 32-bit value that is custom lowered by the TargetLowering::LowerCustomJ...
int64_t getImm() const
Register getReg() const
getReg - Returns the register number.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
static bool isVirtualRegister(unsigned Reg)
Return true if the specified register number is in the virtual register namespace.
Definition: Register.h:71
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
This class provides iterator support for SDUse operands that use a specific SDNode.
Represents one node in the SelectionDAG.
bool hasOneUse() const
Return true if there is exactly one use of this node.
const SDValue & getOperand(unsigned Num) const
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
bool hasOneUse() const
Return true if there is exactly one node using value ResNo of Node.
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
TypeSize getValueSizeInBits() const
Returns the size of the value in bits.
const SDValue & getOperand(unsigned i) const
uint64_t getConstantOperandVal(unsigned i) const
MVT getSimpleValueType() const
Return the simple ValueType of the referenced return value.
unsigned getOpcode() const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
Definition: SelectionDAG.h:221
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
Definition: SelectionDAG.h:717
SDValue getStackArgumentTokenFactor(SDValue Chain)
Compute a TokenFactor to force all the incoming stack arguments to be loaded from the stack.
SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
SDValue getSetCC(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, ISD::CondCode Cond, SDValue Chain=SDValue(), bool IsSignaling=false)
Helper function to make it easier to build SetCC's if you just have an ISD::CondCode instead of an SD...
SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
SDValue getNOT(const SDLoc &DL, SDValue Val, EVT VT)
Create a bitwise NOT operation as (XOR Val, -1).
const TargetLowering & getTargetLoweringInfo() const
Definition: SelectionDAG.h:474
SDValue getTargetJumpTable(int JTI, EVT VT, unsigned TargetFlags=0)
Definition: SelectionDAG.h:727
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it's not CSE'd).
SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, bool AlwaysInline, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo=AAMDNodes(), AAResults *AA=nullptr)
SDValue getBitcast(EVT VT, SDValue V)
Return a bitcast using the SDLoc of the value operand, and casting to the provided type.
const DataLayout & getDataLayout() const
Definition: SelectionDAG.h:468
SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
void ReplaceAllUsesWith(SDValue From, SDValue To)
Modify anything using 'From' to use 'To' instead.
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)
Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...
SDValue getRegister(unsigned Reg, EVT VT)
const TargetMachine & getTarget() const
Definition: SelectionDAG.h:469
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, unsigned Reg, SDValue N)
Definition: SelectionDAG.h:769
SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
SDValue getValueType(EVT)
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
Definition: SelectionDAG.h:671
SDValue getTargetBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, unsigned TargetFlags=0)
Definition: SelectionDAG.h:764
MachineFunction & getMachineFunction() const
Definition: SelectionDAG.h:465
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, unsigned Reg, EVT VT)
Definition: SelectionDAG.h:795
SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
KnownBits computeKnownBits(SDValue Op, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in Known.
SDValue getRegisterMask(const uint32_t *RegMask)
bool MaskedValueIsZero(SDValue Op, const APInt &Mask, unsigned Depth=0) const
Return true if 'Op & Mask' is known to be zero.
LLVMContext * getContext() const
Definition: SelectionDAG.h:481
SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned TargetFlags=0)
SDValue CreateStackTemporary(TypeSize Bytes, Align Alignment)
Create a stack temporary based on the size in bytes and the alignment.
SDNode * UpdateNodeOperands(SDNode *N, SDValue Op)
Mutate the specified node in-place to have the specified operands.
SDValue getTargetConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offset=0, unsigned TargetFlags=0)
Definition: SelectionDAG.h:733
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
Definition: SelectionDAG.h:550
bool empty() const
Definition: SmallVector.h:94
size_t size() const
Definition: SmallVector.h:91
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:577
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:941
void push_back(const T &Elt)
Definition: SmallVector.h:416
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1200
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
constexpr size_t size() const
size - Get the string size.
Definition: StringRef.h:137
Information about stack frame layout on the target.
unsigned getStackAlignment() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
TargetInstrInfo - Interface to description of machine instruction set.
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const
Return the register class that should be used for the specified value type.
const TargetMachine & getTargetMachine() const
void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits)
Set the maximum atomic operation size supported by the backend.
Register getStackPointerRegisterToSaveRestore() const
If a physical register, this specifies the register that llvm.savestack/llvm.restorestack should save...
void setMinFunctionAlignment(Align Alignment)
Set the target's minimum function alignment.
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose.
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
bool isOperationLegal(unsigned Op, EVT VT) const
Return true if the specified operation is legal on this target.
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
void setStackPointerRegisterToSaveRestore(Register R)
If set to a physical register, this specifies the register that llvm.savestack/llvm....
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
virtual void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
Lower the specified operand into the Ops vector.
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
bool parametersInCSRMatch(const MachineRegisterInfo &MRI, const uint32_t *CallerPreservedMask, const SmallVectorImpl< CCValAssign > &ArgLocs, const SmallVectorImpl< SDValue > &OutVals) const
Check whether parameters to a call that are passed in callee saved registers are the same as from the...
bool isPositionIndependent() const
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
TargetOptions Options
unsigned GuaranteedTailCallOpt
GuaranteedTailCallOpt - This flag is enabled when -tailcallopt is specified on the commandline.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
Value * getOperand(unsigned i) const
Definition: User.h:169
LLVM Value Representation.
Definition: Value.h:74
bool hasOneUse() const
Return true if there is exactly one use of this value.
Definition: Value.h:434
use_iterator use_begin()
Definition: Value.h:360
self_iterator getIterator()
Definition: ilist_node.h:82
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
Definition: BitmaskEnum.h:80
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition: CallingConv.h:24
@ M68k_INTR
Used for M68k interrupt routines.
Definition: CallingConv.h:236
@ Swift
Calling convention for Swift.
Definition: CallingConv.h:69
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
CondCode getSetCCInverse(CondCode Operation, bool isIntegerLike)
Return the operation corresponding to !(X op Y), where 'op' is a valid SetCC operation.
bool isNON_EXTLoad(const SDNode *N)
Returns true if the specified node is a non-extending load.
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
Definition: ISDOpcodes.h:736
@ MERGE_VALUES
MERGE_VALUES - This node takes multiple discrete operands and returns them all as its individual resu...
Definition: ISDOpcodes.h:236
@ STACKRESTORE
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain.
Definition: ISDOpcodes.h:1056
@ STACKSAVE
STACKSAVE - STACKSAVE has one operand, an input chain.
Definition: ISDOpcodes.h:1052
@ SMUL_LOHI
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
Definition: ISDOpcodes.h:250
@ ATOMIC_LOAD_NAND
Definition: ISDOpcodes.h:1193
@ BSWAP
Byte Swap and Counting operators.
Definition: ISDOpcodes.h:700
@ VAEND
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE.
Definition: ISDOpcodes.h:1085
@ ATOMIC_LOAD_MAX
Definition: ISDOpcodes.h:1195
@ ATOMIC_LOAD_UMIN
Definition: ISDOpcodes.h:1196
@ ADDC
Carry-setting nodes for multiple precision addition and subtraction.
Definition: ISDOpcodes.h:269
@ ADD
Simple integer binary arithmetic operators.
Definition: ISDOpcodes.h:239
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
Definition: ISDOpcodes.h:766
@ GlobalAddress
Definition: ISDOpcodes.h:78