LLVM 20.0.0git
VEISelLowering.cpp
Go to the documentation of this file.
1//===-- VEISelLowering.cpp - VE DAG Lowering Implementation ---------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the interfaces that VE uses to lower LLVM code into a
10// selection DAG.
11//
12//===----------------------------------------------------------------------===//
13
14#include "VEISelLowering.h"
16#include "VECustomDAG.h"
17#include "VEInstrBuilder.h"
19#include "VERegisterInfo.h"
20#include "VETargetMachine.h"
32#include "llvm/IR/Function.h"
33#include "llvm/IR/IRBuilder.h"
34#include "llvm/IR/Module.h"
37using namespace llvm;
38
39#define DEBUG_TYPE "ve-lower"
40
41//===----------------------------------------------------------------------===//
42// Calling Convention Implementation
43//===----------------------------------------------------------------------===//
44
45#include "VEGenCallingConv.inc"
46
48 switch (CallConv) {
49 default:
50 return RetCC_VE_C;
52 return RetCC_VE_Fast;
53 }
54}
55
56CCAssignFn *getParamCC(CallingConv::ID CallConv, bool IsVarArg) {
57 if (IsVarArg)
58 return CC_VE2;
59 switch (CallConv) {
60 default:
61 return CC_VE_C;
63 return CC_VE_Fast;
64 }
65}
66
68 CallingConv::ID CallConv, MachineFunction &MF, bool IsVarArg,
69 const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const {
70 CCAssignFn *RetCC = getReturnCC(CallConv);
72 CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
73 return CCInfo.CheckReturn(Outs, RetCC);
74}
75
76static const MVT AllVectorVTs[] = {MVT::v256i32, MVT::v512i32, MVT::v256i64,
77 MVT::v256f32, MVT::v512f32, MVT::v256f64};
78
79static const MVT AllMaskVTs[] = {MVT::v256i1, MVT::v512i1};
80
81static const MVT AllPackedVTs[] = {MVT::v512i32, MVT::v512f32};
82
83void VETargetLowering::initRegisterClasses() {
84 // Set up the register classes.
85 addRegisterClass(MVT::i32, &VE::I32RegClass);
86 addRegisterClass(MVT::i64, &VE::I64RegClass);
87 addRegisterClass(MVT::f32, &VE::F32RegClass);
88 addRegisterClass(MVT::f64, &VE::I64RegClass);
89 addRegisterClass(MVT::f128, &VE::F128RegClass);
90
91 if (Subtarget->enableVPU()) {
92 for (MVT VecVT : AllVectorVTs)
93 addRegisterClass(VecVT, &VE::V64RegClass);
94 addRegisterClass(MVT::v256i1, &VE::VMRegClass);
95 addRegisterClass(MVT::v512i1, &VE::VM512RegClass);
96 }
97}
98
99void VETargetLowering::initSPUActions() {
100 const auto &TM = getTargetMachine();
101 /// Load & Store {
102
103 // VE doesn't have i1 sign extending load.
104 for (MVT VT : MVT::integer_valuetypes()) {
108 setTruncStoreAction(VT, MVT::i1, Expand);
109 }
110
111 // VE doesn't have floating point extload/truncstore, so expand them.
112 for (MVT FPVT : MVT::fp_valuetypes()) {
113 for (MVT OtherFPVT : MVT::fp_valuetypes()) {
114 setLoadExtAction(ISD::EXTLOAD, FPVT, OtherFPVT, Expand);
115 setTruncStoreAction(FPVT, OtherFPVT, Expand);
116 }
117 }
118
119 // VE doesn't have fp128 load/store, so expand them in custom lower.
122
123 /// } Load & Store
124
125 // Custom legalize address nodes into LO/HI parts.
126 MVT PtrVT = MVT::getIntegerVT(TM.getPointerSizeInBits(0));
132
133 /// VAARG handling {
135 // VAARG needs to be lowered to access with 8 bytes alignment.
137 // Use the default implementation.
140 /// } VAARG handling
141
142 /// Stack {
145
146 // Use the default implementation.
149 /// } Stack
150
151 /// Branch {
152
153 // VE doesn't have BRCOND
155
156 // BR_JT is not implemented yet.
158
159 /// } Branch
160
161 /// Int Ops {
162 for (MVT IntVT : {MVT::i32, MVT::i64}) {
163 // VE has no REM or DIVREM operations.
168
169 // VE has no SHL_PARTS/SRA_PARTS/SRL_PARTS operations.
173
174 // VE has no MULHU/S or U/SMUL_LOHI operations.
175 // TODO: Use MPD instruction to implement SMUL_LOHI for i32 type.
180
181 // VE has no CTTZ, ROTL, ROTR operations.
185
186 // VE has 64 bits instruction which works as i64 BSWAP operation. This
187 // instruction works fine as i32 BSWAP operation with an additional
188 // parameter. Use isel patterns to lower BSWAP.
190
191 // VE has only 64 bits instructions which work as i64 BITREVERSE/CTLZ/CTPOP
192 // operations. Use isel patterns for i64, promote for i32.
193 LegalizeAction Act = (IntVT == MVT::i32) ? Promote : Legal;
195 setOperationAction(ISD::CTLZ, IntVT, Act);
197 setOperationAction(ISD::CTPOP, IntVT, Act);
198
199 // VE has only 64 bits instructions which work as i64 AND/OR/XOR operations.
200 // Use isel patterns for i64, promote for i32.
201 setOperationAction(ISD::AND, IntVT, Act);
202 setOperationAction(ISD::OR, IntVT, Act);
203 setOperationAction(ISD::XOR, IntVT, Act);
204
205 // Legal smax and smin
208 }
209 /// } Int Ops
210
211 /// Conversion {
212 // VE doesn't have instructions for fp<->uint, so expand them by llvm
213 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Promote); // use i64
214 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Promote); // use i64
217
218 // fp16 not supported
219 for (MVT FPVT : MVT::fp_valuetypes()) {
222 }
223 /// } Conversion
224
225 /// Floating-point Ops {
226 /// Note: Floating-point operations are fneg, fadd, fsub, fmul, fdiv, frem,
227 /// and fcmp.
228
229 // VE doesn't have following floating point operations.
230 for (MVT VT : MVT::fp_valuetypes()) {
233 }
234
235 // VE doesn't have fdiv of f128.
237
238 for (MVT FPVT : {MVT::f32, MVT::f64}) {
239 // f32 and f64 uses ConstantFP. f128 uses ConstantPool.
241 }
242 /// } Floating-point Ops
243
244 /// Floating-point math functions {
245
246 // VE doesn't have following floating point math functions.
247 for (MVT VT : MVT::fp_valuetypes()) {
255 }
256
257 // VE has single and double FMINNUM and FMAXNUM
258 for (MVT VT : {MVT::f32, MVT::f64}) {
260 }
261
262 /// } Floating-point math functions
263
264 /// Atomic instructions {
265
269
270 // Use custom inserter for ATOMIC_FENCE.
272
273 // Other atomic instructions.
274 for (MVT VT : MVT::integer_valuetypes()) {
275 // Support i8/i16 atomic swap.
277
278 // FIXME: Support "atmam" instructions.
283
284 // VE doesn't have follwing instructions.
293 }
294
295 /// } Atomic instructions
296
297 /// SJLJ instructions {
301 if (TM.Options.ExceptionModel == ExceptionHandling::SjLj)
302 setLibcallName(RTLIB::UNWIND_RESUME, "_Unwind_SjLj_Resume");
303 /// } SJLJ instructions
304
305 // Intrinsic instructions
307}
308
309void VETargetLowering::initVPUActions() {
310 for (MVT LegalMaskVT : AllMaskVTs)
312
313 for (unsigned Opc : {ISD::AND, ISD::OR, ISD::XOR})
314 setOperationAction(Opc, MVT::v512i1, Custom);
315
316 for (MVT LegalVecVT : AllVectorVTs) {
320 // Translate all vector instructions with legal element types to VVP_*
321 // nodes.
322 // TODO We will custom-widen into VVP_* nodes in the future. While we are
323 // buildling the infrastructure for this, we only do this for legal vector
324 // VTs.
325#define HANDLE_VP_TO_VVP(VP_OPC, VVP_NAME) \
326 setOperationAction(ISD::VP_OPC, LegalVecVT, Custom);
327#define ADD_VVP_OP(VVP_NAME, ISD_NAME) \
328 setOperationAction(ISD::ISD_NAME, LegalVecVT, Custom);
329 setOperationAction(ISD::EXPERIMENTAL_VP_STRIDED_LOAD, LegalVecVT, Custom);
330 setOperationAction(ISD::EXPERIMENTAL_VP_STRIDED_STORE, LegalVecVT, Custom);
331#include "VVPNodes.def"
332 }
333
334 for (MVT LegalPackedVT : AllPackedVTs) {
337 }
338
339 // vNt32, vNt64 ops (legal element types)
340 for (MVT VT : MVT::vector_valuetypes()) {
341 MVT ElemVT = VT.getVectorElementType();
342 unsigned ElemBits = ElemVT.getScalarSizeInBits();
343 if (ElemBits != 32 && ElemBits != 64)
344 continue;
345
346 for (unsigned MemOpc : {ISD::MLOAD, ISD::MSTORE, ISD::LOAD, ISD::STORE})
347 setOperationAction(MemOpc, VT, Custom);
348
349 const ISD::NodeType IntReductionOCs[] = {
353
354 for (unsigned IntRedOpc : IntReductionOCs)
355 setOperationAction(IntRedOpc, VT, Custom);
356 }
357
358 // v256i1 and v512i1 ops
359 for (MVT MaskVT : AllMaskVTs) {
360 // Custom lower mask ops
363 }
364}
365
368 bool IsVarArg,
370 const SmallVectorImpl<SDValue> &OutVals,
371 const SDLoc &DL, SelectionDAG &DAG) const {
372 // CCValAssign - represent the assignment of the return value to locations.
374
375 // CCState - Info about the registers and stack slot.
376 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
377 *DAG.getContext());
378
379 // Analyze return values.
380 CCInfo.AnalyzeReturn(Outs, getReturnCC(CallConv));
381
382 SDValue Glue;
383 SmallVector<SDValue, 4> RetOps(1, Chain);
384
385 // Copy the result values into the output registers.
386 for (unsigned i = 0; i != RVLocs.size(); ++i) {
387 CCValAssign &VA = RVLocs[i];
388 assert(VA.isRegLoc() && "Can only return in registers!");
389 assert(!VA.needsCustom() && "Unexpected custom lowering");
390 SDValue OutVal = OutVals[i];
391
392 // Integer return values must be sign or zero extended by the callee.
393 switch (VA.getLocInfo()) {
395 break;
397 OutVal = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), OutVal);
398 break;
400 OutVal = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), OutVal);
401 break;
403 OutVal = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), OutVal);
404 break;
405 case CCValAssign::BCvt: {
406 // Convert a float return value to i64 with padding.
407 // 63 31 0
408 // +------+------+
409 // | float| 0 |
410 // +------+------+
411 assert(VA.getLocVT() == MVT::i64);
412 assert(VA.getValVT() == MVT::f32);
413 SDValue Undef = SDValue(
414 DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, MVT::i64), 0);
415 SDValue Sub_f32 = DAG.getTargetConstant(VE::sub_f32, DL, MVT::i32);
416 OutVal = SDValue(DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, DL,
417 MVT::i64, Undef, OutVal, Sub_f32),
418 0);
419 break;
420 }
421 default:
422 llvm_unreachable("Unknown loc info!");
423 }
424
425 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), OutVal, Glue);
426
427 // Guarantee that all emitted copies are stuck together with flags.
428 Glue = Chain.getValue(1);
429 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
430 }
431
432 RetOps[0] = Chain; // Update chain.
433
434 // Add the glue if we have it.
435 if (Glue.getNode())
436 RetOps.push_back(Glue);
437
438 return DAG.getNode(VEISD::RET_GLUE, DL, MVT::Other, RetOps);
439}
440
442 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
443 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
444 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
446
447 // Get the base offset of the incoming arguments stack space.
448 unsigned ArgsBaseOffset = Subtarget->getRsaSize();
449 // Get the size of the preserved arguments area
450 unsigned ArgsPreserved = 64;
451
452 // Analyze arguments according to CC_VE.
454 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), ArgLocs,
455 *DAG.getContext());
456 // Allocate the preserved area first.
457 CCInfo.AllocateStack(ArgsPreserved, Align(8));
458 // We already allocated the preserved area, so the stack offset computed
459 // by CC_VE would be correct now.
460 CCInfo.AnalyzeFormalArguments(Ins, getParamCC(CallConv, false));
461
462 for (const CCValAssign &VA : ArgLocs) {
463 assert(!VA.needsCustom() && "Unexpected custom lowering");
464 if (VA.isRegLoc()) {
465 // This argument is passed in a register.
466 // All integer register arguments are promoted by the caller to i64.
467
468 // Create a virtual register for the promoted live-in value.
469 Register VReg =
470 MF.addLiveIn(VA.getLocReg(), getRegClassFor(VA.getLocVT()));
471 SDValue Arg = DAG.getCopyFromReg(Chain, DL, VReg, VA.getLocVT());
472
473 // The caller promoted the argument, so insert an Assert?ext SDNode so we
474 // won't promote the value again in this function.
475 switch (VA.getLocInfo()) {
477 Arg = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), Arg,
478 DAG.getValueType(VA.getValVT()));
479 break;
481 Arg = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), Arg,
482 DAG.getValueType(VA.getValVT()));
483 break;
484 case CCValAssign::BCvt: {
485 // Extract a float argument from i64 with padding.
486 // 63 31 0
487 // +------+------+
488 // | float| 0 |
489 // +------+------+
490 assert(VA.getLocVT() == MVT::i64);
491 assert(VA.getValVT() == MVT::f32);
492 SDValue Sub_f32 = DAG.getTargetConstant(VE::sub_f32, DL, MVT::i32);
493 Arg = SDValue(DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL,
494 MVT::f32, Arg, Sub_f32),
495 0);
496 break;
497 }
498 default:
499 break;
500 }
501
502 // Truncate the register down to the argument type.
503 if (VA.isExtInLoc())
504 Arg = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Arg);
505
506 InVals.push_back(Arg);
507 continue;
508 }
509
510 // The registers are exhausted. This argument was passed on the stack.
511 assert(VA.isMemLoc());
512 // The CC_VE_Full/Half functions compute stack offsets relative to the
513 // beginning of the arguments area at %fp + the size of reserved area.
514 unsigned Offset = VA.getLocMemOffset() + ArgsBaseOffset;
515 unsigned ValSize = VA.getValVT().getSizeInBits() / 8;
516
517 // Adjust offset for a float argument by adding 4 since the argument is
518 // stored in 8 bytes buffer with offset like below. LLVM generates
519 // 4 bytes load instruction, so need to adjust offset here. This
520 // adjustment is required in only LowerFormalArguments. In LowerCall,
521 // a float argument is converted to i64 first, and stored as 8 bytes
522 // data, which is required by ABI, so no need for adjustment.
523 // 0 4
524 // +------+------+
525 // | empty| float|
526 // +------+------+
527 if (VA.getValVT() == MVT::f32)
528 Offset += 4;
529
530 int FI = MF.getFrameInfo().CreateFixedObject(ValSize, Offset, true);
531 InVals.push_back(
532 DAG.getLoad(VA.getValVT(), DL, Chain,
535 }
536
537 if (!IsVarArg)
538 return Chain;
539
540 // This function takes variable arguments, some of which may have been passed
541 // in registers %s0-%s8.
542 //
543 // The va_start intrinsic needs to know the offset to the first variable
544 // argument.
545 // TODO: need to calculate offset correctly once we support f128.
546 unsigned ArgOffset = ArgLocs.size() * 8;
548 // Skip the reserved area at the top of stack.
549 FuncInfo->setVarArgsFrameOffset(ArgOffset + ArgsBaseOffset);
550
551 return Chain;
552}
553
554// FIXME? Maybe this could be a TableGen attribute on some registers and
555// this table could be generated automatically from RegInfo.
557 const MachineFunction &MF) const {
559 .Case("sp", VE::SX11) // Stack pointer
560 .Case("fp", VE::SX9) // Frame pointer
561 .Case("sl", VE::SX8) // Stack limit
562 .Case("lr", VE::SX10) // Link register
563 .Case("tp", VE::SX14) // Thread pointer
564 .Case("outer", VE::SX12) // Outer regiser
565 .Case("info", VE::SX17) // Info area register
566 .Case("got", VE::SX15) // Global offset table register
567 .Case("plt", VE::SX16) // Procedure linkage table register
568 .Default(0);
569
570 if (Reg)
571 return Reg;
572
573 report_fatal_error("Invalid register name global variable");
574}
575
576//===----------------------------------------------------------------------===//
577// TargetLowering Implementation
578//===----------------------------------------------------------------------===//
579
581 SmallVectorImpl<SDValue> &InVals) const {
582 SelectionDAG &DAG = CLI.DAG;
583 SDLoc DL = CLI.DL;
584 SDValue Chain = CLI.Chain;
585 auto PtrVT = getPointerTy(DAG.getDataLayout());
586
587 // VE target does not yet support tail call optimization.
588 CLI.IsTailCall = false;
589
590 // Get the base offset of the outgoing arguments stack space.
591 unsigned ArgsBaseOffset = Subtarget->getRsaSize();
592 // Get the size of the preserved arguments area
593 unsigned ArgsPreserved = 8 * 8u;
594
595 // Analyze operands of the call, assigning locations to each operand.
597 CCState CCInfo(CLI.CallConv, CLI.IsVarArg, DAG.getMachineFunction(), ArgLocs,
598 *DAG.getContext());
599 // Allocate the preserved area first.
600 CCInfo.AllocateStack(ArgsPreserved, Align(8));
601 // We already allocated the preserved area, so the stack offset computed
602 // by CC_VE would be correct now.
603 CCInfo.AnalyzeCallOperands(CLI.Outs, getParamCC(CLI.CallConv, false));
604
605 // VE requires to use both register and stack for varargs or no-prototyped
606 // functions.
607 bool UseBoth = CLI.IsVarArg;
608
609 // Analyze operands again if it is required to store BOTH.
611 CCState CCInfo2(CLI.CallConv, CLI.IsVarArg, DAG.getMachineFunction(),
612 ArgLocs2, *DAG.getContext());
613 if (UseBoth)
614 CCInfo2.AnalyzeCallOperands(CLI.Outs, getParamCC(CLI.CallConv, true));
615
616 // Get the size of the outgoing arguments stack space requirement.
617 unsigned ArgsSize = CCInfo.getStackSize();
618
619 // Keep stack frames 16-byte aligned.
620 ArgsSize = alignTo(ArgsSize, 16);
621
622 // Adjust the stack pointer to make room for the arguments.
623 // FIXME: Use hasReservedCallFrame to avoid %sp adjustments around all calls
624 // with more than 6 arguments.
625 Chain = DAG.getCALLSEQ_START(Chain, ArgsSize, 0, DL);
626
627 // Collect the set of registers to pass to the function and their values.
628 // This will be emitted as a sequence of CopyToReg nodes glued to the call
629 // instruction.
631
632 // Collect chains from all the memory opeations that copy arguments to the
633 // stack. They must follow the stack pointer adjustment above and precede the
634 // call instruction itself.
635 SmallVector<SDValue, 8> MemOpChains;
636
637 // VE needs to get address of callee function in a register
638 // So, prepare to copy it to SX12 here.
639
640 // If the callee is a GlobalAddress node (quite common, every direct call is)
641 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
642 // Likewise ExternalSymbol -> TargetExternalSymbol.
643 SDValue Callee = CLI.Callee;
644
645 bool IsPICCall = isPositionIndependent();
646
647 // PC-relative references to external symbols should go through $stub.
648 // If so, we need to prepare GlobalBaseReg first.
649 const TargetMachine &TM = DAG.getTarget();
650 const GlobalValue *GV = nullptr;
651 auto *CalleeG = dyn_cast<GlobalAddressSDNode>(Callee);
652 if (CalleeG)
653 GV = CalleeG->getGlobal();
654 bool Local = TM.shouldAssumeDSOLocal(GV);
655 bool UsePlt = !Local;
657
658 // Turn GlobalAddress/ExternalSymbol node into a value node
659 // containing the address of them here.
660 if (CalleeG) {
661 if (IsPICCall) {
662 if (UsePlt)
663 Subtarget->getInstrInfo()->getGlobalBaseReg(&MF);
664 Callee = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, 0);
665 Callee = DAG.getNode(VEISD::GETFUNPLT, DL, PtrVT, Callee);
666 } else {
667 Callee =
669 }
670 } else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee)) {
671 if (IsPICCall) {
672 if (UsePlt)
673 Subtarget->getInstrInfo()->getGlobalBaseReg(&MF);
674 Callee = DAG.getTargetExternalSymbol(E->getSymbol(), PtrVT, 0);
675 Callee = DAG.getNode(VEISD::GETFUNPLT, DL, PtrVT, Callee);
676 } else {
677 Callee =
679 }
680 }
681
682 RegsToPass.push_back(std::make_pair(VE::SX12, Callee));
683
684 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
685 CCValAssign &VA = ArgLocs[i];
686 SDValue Arg = CLI.OutVals[i];
687
688 // Promote the value if needed.
689 switch (VA.getLocInfo()) {
690 default:
691 llvm_unreachable("Unknown location info!");
693 break;
695 Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Arg);
696 break;
698 Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg);
699 break;
701 Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg);
702 break;
703 case CCValAssign::BCvt: {
704 // Convert a float argument to i64 with padding.
705 // 63 31 0
706 // +------+------+
707 // | float| 0 |
708 // +------+------+
709 assert(VA.getLocVT() == MVT::i64);
710 assert(VA.getValVT() == MVT::f32);
711 SDValue Undef = SDValue(
712 DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, MVT::i64), 0);
713 SDValue Sub_f32 = DAG.getTargetConstant(VE::sub_f32, DL, MVT::i32);
714 Arg = SDValue(DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, DL,
715 MVT::i64, Undef, Arg, Sub_f32),
716 0);
717 break;
718 }
719 }
720
721 if (VA.isRegLoc()) {
722 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
723 if (!UseBoth)
724 continue;
725 VA = ArgLocs2[i];
726 }
727
728 assert(VA.isMemLoc());
729
730 // Create a store off the stack pointer for this argument.
731 SDValue StackPtr = DAG.getRegister(VE::SX11, PtrVT);
732 // The argument area starts at %fp/%sp + the size of reserved area.
733 SDValue PtrOff =
734 DAG.getIntPtrConstant(VA.getLocMemOffset() + ArgsBaseOffset, DL);
735 PtrOff = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, PtrOff);
736 MemOpChains.push_back(
737 DAG.getStore(Chain, DL, Arg, PtrOff, MachinePointerInfo()));
738 }
739
740 // Emit all stores, make sure they occur before the call.
741 if (!MemOpChains.empty())
742 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
743
744 // Build a sequence of CopyToReg nodes glued together with token chain and
745 // glue operands which copy the outgoing args into registers. The InGlue is
746 // necessary since all emitted instructions must be stuck together in order
747 // to pass the live physical registers.
748 SDValue InGlue;
749 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
750 Chain = DAG.getCopyToReg(Chain, DL, RegsToPass[i].first,
751 RegsToPass[i].second, InGlue);
752 InGlue = Chain.getValue(1);
753 }
754
755 // Build the operands for the call instruction itself.
757 Ops.push_back(Chain);
758 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
759 Ops.push_back(DAG.getRegister(RegsToPass[i].first,
760 RegsToPass[i].second.getValueType()));
761
762 // Add a register mask operand representing the call-preserved registers.
763 const VERegisterInfo *TRI = Subtarget->getRegisterInfo();
764 const uint32_t *Mask =
765 TRI->getCallPreservedMask(DAG.getMachineFunction(), CLI.CallConv);
766 assert(Mask && "Missing call preserved mask for calling convention");
767 Ops.push_back(DAG.getRegisterMask(Mask));
768
769 // Make sure the CopyToReg nodes are glued to the call instruction which
770 // consumes the registers.
771 if (InGlue.getNode())
772 Ops.push_back(InGlue);
773
774 // Now the call itself.
775 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
776 Chain = DAG.getNode(VEISD::CALL, DL, NodeTys, Ops);
777 InGlue = Chain.getValue(1);
778
779 // Revert the stack pointer immediately after the call.
780 Chain = DAG.getCALLSEQ_END(Chain, ArgsSize, 0, InGlue, DL);
781 InGlue = Chain.getValue(1);
782
783 // Now extract the return values. This is more or less the same as
784 // LowerFormalArguments.
785
786 // Assign locations to each value returned by this call.
788 CCState RVInfo(CLI.CallConv, CLI.IsVarArg, DAG.getMachineFunction(), RVLocs,
789 *DAG.getContext());
790
791 // Set inreg flag manually for codegen generated library calls that
792 // return float.
793 if (CLI.Ins.size() == 1 && CLI.Ins[0].VT == MVT::f32 && !CLI.CB)
794 CLI.Ins[0].Flags.setInReg();
795
796 RVInfo.AnalyzeCallResult(CLI.Ins, getReturnCC(CLI.CallConv));
797
798 // Copy all of the result registers out of their specified physreg.
799 for (unsigned i = 0; i != RVLocs.size(); ++i) {
800 CCValAssign &VA = RVLocs[i];
801 assert(!VA.needsCustom() && "Unexpected custom lowering");
802 Register Reg = VA.getLocReg();
803
804 // When returning 'inreg {i32, i32 }', two consecutive i32 arguments can
805 // reside in the same register in the high and low bits. Reuse the
806 // CopyFromReg previous node to avoid duplicate copies.
807 SDValue RV;
808 if (RegisterSDNode *SrcReg = dyn_cast<RegisterSDNode>(Chain.getOperand(1)))
809 if (SrcReg->getReg() == Reg && Chain->getOpcode() == ISD::CopyFromReg)
810 RV = Chain.getValue(0);
811
812 // But usually we'll create a new CopyFromReg for a different register.
813 if (!RV.getNode()) {
814 RV = DAG.getCopyFromReg(Chain, DL, Reg, RVLocs[i].getLocVT(), InGlue);
815 Chain = RV.getValue(1);
816 InGlue = Chain.getValue(2);
817 }
818
819 // The callee promoted the return value, so insert an Assert?ext SDNode so
820 // we won't promote the value again in this function.
821 switch (VA.getLocInfo()) {
823 RV = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), RV,
824 DAG.getValueType(VA.getValVT()));
825 break;
827 RV = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), RV,
828 DAG.getValueType(VA.getValVT()));
829 break;
830 case CCValAssign::BCvt: {
831 // Extract a float return value from i64 with padding.
832 // 63 31 0
833 // +------+------+
834 // | float| 0 |
835 // +------+------+
836 assert(VA.getLocVT() == MVT::i64);
837 assert(VA.getValVT() == MVT::f32);
838 SDValue Sub_f32 = DAG.getTargetConstant(VE::sub_f32, DL, MVT::i32);
839 RV = SDValue(DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL,
840 MVT::f32, RV, Sub_f32),
841 0);
842 break;
843 }
844 default:
845 break;
846 }
847
848 // Truncate the register down to the return value type.
849 if (VA.isExtInLoc())
850 RV = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), RV);
851
852 InVals.push_back(RV);
853 }
854
855 return Chain;
856}
857
859 const GlobalAddressSDNode *GA) const {
860 // VE uses 64 bit addressing, so we need multiple instructions to generate
861 // an address. Folding address with offset increases the number of
862 // instructions, so that we disable it here. Offsets will be folded in
863 // the DAG combine later if it worth to do so.
864 return false;
865}
866
867/// isFPImmLegal - Returns true if the target can instruction select the
868/// specified FP immediate natively. If false, the legalizer will
869/// materialize the FP immediate as a load from a constant pool.
871 bool ForCodeSize) const {
872 return VT == MVT::f32 || VT == MVT::f64;
873}
874
875/// Determine if the target supports unaligned memory accesses.
876///
877/// This function returns true if the target allows unaligned memory accesses
878/// of the specified type in the given address space. If true, it also returns
879/// whether the unaligned memory access is "fast" in the last argument by
880/// reference. This is used, for example, in situations where an array
881/// copy/move/set is converted to a sequence of store operations. Its use
882/// helps to ensure that such replacements don't generate code that causes an
883/// alignment error (trap) on the target machine.
885 unsigned AddrSpace,
886 Align A,
888 unsigned *Fast) const {
889 if (Fast) {
890 // It's fast anytime on VE
891 *Fast = 1;
892 }
893 return true;
894}
895
897 const VESubtarget &STI)
898 : TargetLowering(TM), Subtarget(&STI) {
899 // Instructions which use registers as conditionals examine all the
900 // bits (as does the pseudo SELECT_CC expansion). I don't think it
901 // matters much whether it's ZeroOrOneBooleanContent, or
902 // ZeroOrNegativeOneBooleanContent, so, arbitrarily choose the
903 // former.
906
907 initRegisterClasses();
908 initSPUActions();
909 initVPUActions();
910
912
913 // We have target-specific dag combine patterns for the following nodes:
917
918 // Set function alignment to 16 bytes
920
921 // VE stores all argument by 8 bytes alignment
923
925}
926
927const char *VETargetLowering::getTargetNodeName(unsigned Opcode) const {
928#define TARGET_NODE_CASE(NAME) \
929 case VEISD::NAME: \
930 return "VEISD::" #NAME;
931 switch ((VEISD::NodeType)Opcode) {
933 break;
934 TARGET_NODE_CASE(CMPI)
935 TARGET_NODE_CASE(CMPU)
936 TARGET_NODE_CASE(CMPF)
937 TARGET_NODE_CASE(CMPQ)
938 TARGET_NODE_CASE(CMOV)
939 TARGET_NODE_CASE(CALL)
940 TARGET_NODE_CASE(EH_SJLJ_LONGJMP)
941 TARGET_NODE_CASE(EH_SJLJ_SETJMP)
942 TARGET_NODE_CASE(EH_SJLJ_SETUP_DISPATCH)
943 TARGET_NODE_CASE(GETFUNPLT)
944 TARGET_NODE_CASE(GETSTACKTOP)
945 TARGET_NODE_CASE(GETTLSADDR)
946 TARGET_NODE_CASE(GLOBAL_BASE_REG)
949 TARGET_NODE_CASE(RET_GLUE)
950 TARGET_NODE_CASE(TS1AM)
951 TARGET_NODE_CASE(VEC_UNPACK_LO)
952 TARGET_NODE_CASE(VEC_UNPACK_HI)
953 TARGET_NODE_CASE(VEC_PACK)
954 TARGET_NODE_CASE(VEC_BROADCAST)
955 TARGET_NODE_CASE(REPL_I32)
956 TARGET_NODE_CASE(REPL_F32)
957
958 TARGET_NODE_CASE(LEGALAVL)
959
960 // Register the VVP_* SDNodes.
961#define ADD_VVP_OP(VVP_NAME, ...) TARGET_NODE_CASE(VVP_NAME)
962#include "VVPNodes.def"
963 }
964#undef TARGET_NODE_CASE
965 return nullptr;
966}
967
969 EVT VT) const {
970 return MVT::i32;
971}
972
973// Convert to a target node and set target flags.
975 SelectionDAG &DAG) const {
976 if (const GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op))
977 return DAG.getTargetGlobalAddress(GA->getGlobal(), SDLoc(GA),
978 GA->getValueType(0), GA->getOffset(), TF);
979
980 if (const BlockAddressSDNode *BA = dyn_cast<BlockAddressSDNode>(Op))
981 return DAG.getTargetBlockAddress(BA->getBlockAddress(), Op.getValueType(),
982 0, TF);
983
984 if (const ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op))
985 return DAG.getTargetConstantPool(CP->getConstVal(), CP->getValueType(0),
986 CP->getAlign(), CP->getOffset(), TF);
987
988 if (const ExternalSymbolSDNode *ES = dyn_cast<ExternalSymbolSDNode>(Op))
989 return DAG.getTargetExternalSymbol(ES->getSymbol(), ES->getValueType(0),
990 TF);
991
992 if (const JumpTableSDNode *JT = dyn_cast<JumpTableSDNode>(Op))
993 return DAG.getTargetJumpTable(JT->getIndex(), JT->getValueType(0), TF);
994
995 llvm_unreachable("Unhandled address SDNode");
996}
997
998// Split Op into high and low parts according to HiTF and LoTF.
999// Return an ADD node combining the parts.
1000SDValue VETargetLowering::makeHiLoPair(SDValue Op, unsigned HiTF, unsigned LoTF,
1001 SelectionDAG &DAG) const {
1002 SDLoc DL(Op);
1003 EVT VT = Op.getValueType();
1004 SDValue Hi = DAG.getNode(VEISD::Hi, DL, VT, withTargetFlags(Op, HiTF, DAG));
1005 SDValue Lo = DAG.getNode(VEISD::Lo, DL, VT, withTargetFlags(Op, LoTF, DAG));
1006 return DAG.getNode(ISD::ADD, DL, VT, Hi, Lo);
1007}
1008
1009// Build SDNodes for producing an address from a GlobalAddress, ConstantPool,
1010// or ExternalSymbol SDNode.
1012 SDLoc DL(Op);
1013 EVT PtrVT = Op.getValueType();
1014
1015 // Handle PIC mode first. VE needs a got load for every variable!
1016 if (isPositionIndependent()) {
1017 auto GlobalN = dyn_cast<GlobalAddressSDNode>(Op);
1018
1019 if (isa<ConstantPoolSDNode>(Op) || isa<JumpTableSDNode>(Op) ||
1020 (GlobalN && GlobalN->getGlobal()->hasLocalLinkage())) {
1021 // Create following instructions for local linkage PIC code.
1022 // lea %reg, label@gotoff_lo
1023 // and %reg, %reg, (32)0
1024 // lea.sl %reg, label@gotoff_hi(%reg, %got)
1027 SDValue GlobalBase = DAG.getNode(VEISD::GLOBAL_BASE_REG, DL, PtrVT);
1028 return DAG.getNode(ISD::ADD, DL, PtrVT, GlobalBase, HiLo);
1029 }
1030 // Create following instructions for not local linkage PIC code.
1031 // lea %reg, label@got_lo
1032 // and %reg, %reg, (32)0
1033 // lea.sl %reg, label@got_hi(%reg)
1034 // ld %reg, (%reg, %got)
1037 SDValue GlobalBase = DAG.getNode(VEISD::GLOBAL_BASE_REG, DL, PtrVT);
1038 SDValue AbsAddr = DAG.getNode(ISD::ADD, DL, PtrVT, GlobalBase, HiLo);
1039 return DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), AbsAddr,
1041 }
1042
1043 // This is one of the absolute code models.
1044 switch (getTargetMachine().getCodeModel()) {
1045 default:
1046 llvm_unreachable("Unsupported absolute code model");
1047 case CodeModel::Small:
1048 case CodeModel::Medium:
1049 case CodeModel::Large:
1050 // abs64.
1052 }
1053}
1054
1055/// Custom Lower {
1056
1057// The mappings for emitLeading/TrailingFence for VE is designed by following
1058// http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
1060 Instruction *Inst,
1061 AtomicOrdering Ord) const {
1062 switch (Ord) {
1065 llvm_unreachable("Invalid fence: unordered/non-atomic");
1068 return nullptr; // Nothing to do
1071 return Builder.CreateFence(AtomicOrdering::Release);
1073 if (!Inst->hasAtomicStore())
1074 return nullptr; // Nothing to do
1076 }
1077 llvm_unreachable("Unknown fence ordering in emitLeadingFence");
1078}
1079
1081 Instruction *Inst,
1082 AtomicOrdering Ord) const {
1083 switch (Ord) {
1086 llvm_unreachable("Invalid fence: unordered/not-atomic");
1089 return nullptr; // Nothing to do
1092 return Builder.CreateFence(AtomicOrdering::Acquire);
1095 }
1096 llvm_unreachable("Unknown fence ordering in emitTrailingFence");
1097}
1098
1100 SelectionDAG &DAG) const {
1101 SDLoc DL(Op);
1102 AtomicOrdering FenceOrdering =
1103 static_cast<AtomicOrdering>(Op.getConstantOperandVal(1));
1104 SyncScope::ID FenceSSID =
1105 static_cast<SyncScope::ID>(Op.getConstantOperandVal(2));
1106
1107 // VE uses Release consistency, so need a fence instruction if it is a
1108 // cross-thread fence.
1109 if (FenceSSID == SyncScope::System) {
1110 switch (FenceOrdering) {
1114 // No need to generate fencem instruction here.
1115 break;
1117 // Generate "fencem 2" as acquire fence.
1118 return SDValue(DAG.getMachineNode(VE::FENCEM, DL, MVT::Other,
1119 DAG.getTargetConstant(2, DL, MVT::i32),
1120 Op.getOperand(0)),
1121 0);
1123 // Generate "fencem 1" as release fence.
1124 return SDValue(DAG.getMachineNode(VE::FENCEM, DL, MVT::Other,
1125 DAG.getTargetConstant(1, DL, MVT::i32),
1126 Op.getOperand(0)),
1127 0);
1130 // Generate "fencem 3" as acq_rel and seq_cst fence.
1131 // FIXME: "fencem 3" doesn't wait for PCIe deveices accesses,
1132 // so seq_cst may require more instruction for them.
1133 return SDValue(DAG.getMachineNode(VE::FENCEM, DL, MVT::Other,
1134 DAG.getTargetConstant(3, DL, MVT::i32),
1135 Op.getOperand(0)),
1136 0);
1137 }
1138 }
1139
1140 // MEMBARRIER is a compiler barrier; it codegens to a no-op.
1141 return DAG.getNode(ISD::MEMBARRIER, DL, MVT::Other, Op.getOperand(0));
1142}
1143
1146 // We have TS1AM implementation for i8/i16/i32/i64, so use it.
1147 if (AI->getOperation() == AtomicRMWInst::Xchg) {
1149 }
1150 // FIXME: Support "ATMAM" instruction for LOAD_ADD/SUB/AND/OR.
1151
1152 // Otherwise, expand it using compare and exchange instruction to not call
1153 // __sync_fetch_and_* functions.
1155}
1156
1158 SDValue &Bits) {
1159 SDLoc DL(Op);
1160 AtomicSDNode *N = cast<AtomicSDNode>(Op);
1161 SDValue Ptr = N->getOperand(1);
1162 SDValue Val = N->getOperand(2);
1163 EVT PtrVT = Ptr.getValueType();
1164 bool Byte = N->getMemoryVT() == MVT::i8;
1165 // Remainder = AND Ptr, 3
1166 // Flag = 1 << Remainder ; If Byte is true (1 byte swap flag)
1167 // Flag = 3 << Remainder ; If Byte is false (2 bytes swap flag)
1168 // Bits = Remainder << 3
1169 // NewVal = Val << Bits
1170 SDValue Const3 = DAG.getConstant(3, DL, PtrVT);
1171 SDValue Remainder = DAG.getNode(ISD::AND, DL, PtrVT, {Ptr, Const3});
1172 SDValue Mask = Byte ? DAG.getConstant(1, DL, MVT::i32)
1173 : DAG.getConstant(3, DL, MVT::i32);
1174 Flag = DAG.getNode(ISD::SHL, DL, MVT::i32, {Mask, Remainder});
1175 Bits = DAG.getNode(ISD::SHL, DL, PtrVT, {Remainder, Const3});
1176 return DAG.getNode(ISD::SHL, DL, Val.getValueType(), {Val, Bits});
1177}
1178
1180 SDValue Bits) {
1181 SDLoc DL(Op);
1182 EVT VT = Data.getValueType();
1183 bool Byte = cast<AtomicSDNode>(Op)->getMemoryVT() == MVT::i8;
1184 // NewData = Data >> Bits
1185 // Result = NewData & 0xff ; If Byte is true (1 byte)
1186 // Result = NewData & 0xffff ; If Byte is false (2 bytes)
1187
1188 SDValue NewData = DAG.getNode(ISD::SRL, DL, VT, Data, Bits);
1189 return DAG.getNode(ISD::AND, DL, VT,
1190 {NewData, DAG.getConstant(Byte ? 0xff : 0xffff, DL, VT)});
1191}
1192
1194 SelectionDAG &DAG) const {
1195 SDLoc DL(Op);
1196 AtomicSDNode *N = cast<AtomicSDNode>(Op);
1197
1198 if (N->getMemoryVT() == MVT::i8) {
1199 // For i8, use "ts1am"
1200 // Input:
1201 // ATOMIC_SWAP Ptr, Val, Order
1202 //
1203 // Output:
1204 // Remainder = AND Ptr, 3
1205 // Flag = 1 << Remainder ; 1 byte swap flag for TS1AM inst.
1206 // Bits = Remainder << 3
1207 // NewVal = Val << Bits
1208 //
1209 // Aligned = AND Ptr, -4
1210 // Data = TS1AM Aligned, Flag, NewVal
1211 //
1212 // NewData = Data >> Bits
1213 // Result = NewData & 0xff ; 1 byte result
1214 SDValue Flag;
1215 SDValue Bits;
1216 SDValue NewVal = prepareTS1AM(Op, DAG, Flag, Bits);
1217
1218 SDValue Ptr = N->getOperand(1);
1219 SDValue Aligned = DAG.getNode(ISD::AND, DL, Ptr.getValueType(),
1220 {Ptr, DAG.getConstant(-4, DL, MVT::i64)});
1221 SDValue TS1AM = DAG.getAtomic(VEISD::TS1AM, DL, N->getMemoryVT(),
1222 DAG.getVTList(Op.getNode()->getValueType(0),
1223 Op.getNode()->getValueType(1)),
1224 {N->getChain(), Aligned, Flag, NewVal},
1225 N->getMemOperand());
1226
1227 SDValue Result = finalizeTS1AM(Op, DAG, TS1AM, Bits);
1228 SDValue Chain = TS1AM.getValue(1);
1229 return DAG.getMergeValues({Result, Chain}, DL);
1230 }
1231 if (N->getMemoryVT() == MVT::i16) {
1232 // For i16, use "ts1am"
1233 SDValue Flag;
1234 SDValue Bits;
1235 SDValue NewVal = prepareTS1AM(Op, DAG, Flag, Bits);
1236
1237 SDValue Ptr = N->getOperand(1);
1238 SDValue Aligned = DAG.getNode(ISD::AND, DL, Ptr.getValueType(),
1239 {Ptr, DAG.getConstant(-4, DL, MVT::i64)});
1240 SDValue TS1AM = DAG.getAtomic(VEISD::TS1AM, DL, N->getMemoryVT(),
1241 DAG.getVTList(Op.getNode()->getValueType(0),
1242 Op.getNode()->getValueType(1)),
1243 {N->getChain(), Aligned, Flag, NewVal},
1244 N->getMemOperand());
1245
1246 SDValue Result = finalizeTS1AM(Op, DAG, TS1AM, Bits);
1247 SDValue Chain = TS1AM.getValue(1);
1248 return DAG.getMergeValues({Result, Chain}, DL);
1249 }
1250 // Otherwise, let llvm legalize it.
1251 return Op;
1252}
1253
1255 SelectionDAG &DAG) const {
1256 return makeAddress(Op, DAG);
1257}
1258
1260 SelectionDAG &DAG) const {
1261 return makeAddress(Op, DAG);
1262}
1263
1265 SelectionDAG &DAG) const {
1266 return makeAddress(Op, DAG);
1267}
1268
1269SDValue
1271 SelectionDAG &DAG) const {
1272 SDLoc DL(Op);
1273
1274 // Generate the following code:
1275 // t1: ch,glue = callseq_start t0, 0, 0
1276 // t2: i64,ch,glue = VEISD::GETTLSADDR t1, label, t1:1
1277 // t3: ch,glue = callseq_end t2, 0, 0, t2:2
1278 // t4: i64,ch,glue = CopyFromReg t3, Register:i64 $sx0, t3:1
1279 SDValue Label = withTargetFlags(Op, 0, DAG);
1280 EVT PtrVT = Op.getValueType();
1281
1282 // Lowering the machine isd will make sure everything is in the right
1283 // location.
1284 SDValue Chain = DAG.getEntryNode();
1285 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
1286 const uint32_t *Mask = Subtarget->getRegisterInfo()->getCallPreservedMask(
1288 Chain = DAG.getCALLSEQ_START(Chain, 64, 0, DL);
1289 SDValue Args[] = {Chain, Label, DAG.getRegisterMask(Mask), Chain.getValue(1)};
1290 Chain = DAG.getNode(VEISD::GETTLSADDR, DL, NodeTys, Args);
1291 Chain = DAG.getCALLSEQ_END(Chain, 64, 0, Chain.getValue(1), DL);
1292 Chain = DAG.getCopyFromReg(Chain, DL, VE::SX0, PtrVT, Chain.getValue(1));
1293
1294 // GETTLSADDR will be codegen'ed as call. Inform MFI that function has calls.
1296 MFI.setHasCalls(true);
1297
1298 // Also generate code to prepare a GOT register if it is PIC.
1299 if (isPositionIndependent()) {
1301 Subtarget->getInstrInfo()->getGlobalBaseReg(&MF);
1302 }
1303
1304 return Chain;
1305}
1306
1308 SelectionDAG &DAG) const {
1309 // The current implementation of nld (2.26) doesn't allow local exec model
1310 // code described in VE-tls_v1.1.pdf (*1) as its input. Instead, we always
1311 // generate the general dynamic model code sequence.
1312 //
1313 // *1: https://www.nec.com/en/global/prod/hpc/aurora/document/VE-tls_v1.1.pdf
1314 return lowerToTLSGeneralDynamicModel(Op, DAG);
1315}
1316
1318 return makeAddress(Op, DAG);
1319}
1320
1321// Lower a f128 load into two f64 loads.
1323 SDLoc DL(Op);
1324 LoadSDNode *LdNode = dyn_cast<LoadSDNode>(Op.getNode());
1325 assert(LdNode && LdNode->getOffset().isUndef() && "Unexpected node type");
1326 Align Alignment = LdNode->getAlign();
1327 if (Alignment > 8)
1328 Alignment = Align(8);
1329
1330 SDValue Lo64 =
1331 DAG.getLoad(MVT::f64, DL, LdNode->getChain(), LdNode->getBasePtr(),
1332 LdNode->getPointerInfo(), Alignment,
1335 EVT AddrVT = LdNode->getBasePtr().getValueType();
1336 SDValue HiPtr = DAG.getNode(ISD::ADD, DL, AddrVT, LdNode->getBasePtr(),
1337 DAG.getConstant(8, DL, AddrVT));
1338 SDValue Hi64 =
1339 DAG.getLoad(MVT::f64, DL, LdNode->getChain(), HiPtr,
1340 LdNode->getPointerInfo(), Alignment,
1343
1344 SDValue SubRegEven = DAG.getTargetConstant(VE::sub_even, DL, MVT::i32);
1345 SDValue SubRegOdd = DAG.getTargetConstant(VE::sub_odd, DL, MVT::i32);
1346
1347 // VE stores Hi64 to 8(addr) and Lo64 to 0(addr)
1348 SDNode *InFP128 =
1349 DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, MVT::f128);
1350 InFP128 = DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, DL, MVT::f128,
1351 SDValue(InFP128, 0), Hi64, SubRegEven);
1352 InFP128 = DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, DL, MVT::f128,
1353 SDValue(InFP128, 0), Lo64, SubRegOdd);
1354 SDValue OutChains[2] = {SDValue(Lo64.getNode(), 1),
1355 SDValue(Hi64.getNode(), 1)};
1356 SDValue OutChain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains);
1357 SDValue Ops[2] = {SDValue(InFP128, 0), OutChain};
1358 return DAG.getMergeValues(Ops, DL);
1359}
1360
1361// Lower a vXi1 load into following instructions
1362// LDrii %1, (,%addr)
1363// LVMxir %vm, 0, %1
1364// LDrii %2, 8(,%addr)
1365// LVMxir %vm, 0, %2
1366// ...
1368 SDLoc DL(Op);
1369 LoadSDNode *LdNode = dyn_cast<LoadSDNode>(Op.getNode());
1370 assert(LdNode && LdNode->getOffset().isUndef() && "Unexpected node type");
1371
1372 SDValue BasePtr = LdNode->getBasePtr();
1373 Align Alignment = LdNode->getAlign();
1374 if (Alignment > 8)
1375 Alignment = Align(8);
1376
1377 EVT AddrVT = BasePtr.getValueType();
1378 EVT MemVT = LdNode->getMemoryVT();
1379 if (MemVT == MVT::v256i1 || MemVT == MVT::v4i64) {
1380 SDValue OutChains[4];
1381 SDNode *VM = DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, MemVT);
1382 for (int i = 0; i < 4; ++i) {
1383 // Generate load dag and prepare chains.
1384 SDValue Addr = DAG.getNode(ISD::ADD, DL, AddrVT, BasePtr,
1385 DAG.getConstant(8 * i, DL, AddrVT));
1386 SDValue Val =
1387 DAG.getLoad(MVT::i64, DL, LdNode->getChain(), Addr,
1388 LdNode->getPointerInfo(), Alignment,
1391 OutChains[i] = SDValue(Val.getNode(), 1);
1392
1393 VM = DAG.getMachineNode(VE::LVMir_m, DL, MVT::i64,
1394 DAG.getTargetConstant(i, DL, MVT::i64), Val,
1395 SDValue(VM, 0));
1396 }
1397 SDValue OutChain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains);
1398 SDValue Ops[2] = {SDValue(VM, 0), OutChain};
1399 return DAG.getMergeValues(Ops, DL);
1400 } else if (MemVT == MVT::v512i1 || MemVT == MVT::v8i64) {
1401 SDValue OutChains[8];
1402 SDNode *VM = DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, MemVT);
1403 for (int i = 0; i < 8; ++i) {
1404 // Generate load dag and prepare chains.
1405 SDValue Addr = DAG.getNode(ISD::ADD, DL, AddrVT, BasePtr,
1406 DAG.getConstant(8 * i, DL, AddrVT));
1407 SDValue Val =
1408 DAG.getLoad(MVT::i64, DL, LdNode->getChain(), Addr,
1409 LdNode->getPointerInfo(), Alignment,
1412 OutChains[i] = SDValue(Val.getNode(), 1);
1413
1414 VM = DAG.getMachineNode(VE::LVMyir_y, DL, MVT::i64,
1415 DAG.getTargetConstant(i, DL, MVT::i64), Val,
1416 SDValue(VM, 0));
1417 }
1418 SDValue OutChain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains);
1419 SDValue Ops[2] = {SDValue(VM, 0), OutChain};
1420 return DAG.getMergeValues(Ops, DL);
1421 } else {
1422 // Otherwise, ask llvm to expand it.
1423 return SDValue();
1424 }
1425}
1426
1428 LoadSDNode *LdNode = cast<LoadSDNode>(Op.getNode());
1429 EVT MemVT = LdNode->getMemoryVT();
1430
1431 // If VPU is enabled, always expand non-mask vector loads to VVP
1432 if (Subtarget->enableVPU() && MemVT.isVector() && !isMaskType(MemVT))
1433 return lowerToVVP(Op, DAG);
1434
1435 SDValue BasePtr = LdNode->getBasePtr();
1436 if (isa<FrameIndexSDNode>(BasePtr.getNode())) {
1437 // Do not expand store instruction with frame index here because of
1438 // dependency problems. We expand it later in eliminateFrameIndex().
1439 return Op;
1440 }
1441
1442 if (MemVT == MVT::f128)
1443 return lowerLoadF128(Op, DAG);
1444 if (isMaskType(MemVT))
1445 return lowerLoadI1(Op, DAG);
1446
1447 return Op;
1448}
1449
1450// Lower a f128 store into two f64 stores.
1452 SDLoc DL(Op);
1453 StoreSDNode *StNode = dyn_cast<StoreSDNode>(Op.getNode());
1454 assert(StNode && StNode->getOffset().isUndef() && "Unexpected node type");
1455
1456 SDValue SubRegEven = DAG.getTargetConstant(VE::sub_even, DL, MVT::i32);
1457 SDValue SubRegOdd = DAG.getTargetConstant(VE::sub_odd, DL, MVT::i32);
1458
1459 SDNode *Hi64 = DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL, MVT::i64,
1460 StNode->getValue(), SubRegEven);
1461 SDNode *Lo64 = DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL, MVT::i64,
1462 StNode->getValue(), SubRegOdd);
1463
1464 Align Alignment = StNode->getAlign();
1465 if (Alignment > 8)
1466 Alignment = Align(8);
1467
1468 // VE stores Hi64 to 8(addr) and Lo64 to 0(addr)
1469 SDValue OutChains[2];
1470 OutChains[0] =
1471 DAG.getStore(StNode->getChain(), DL, SDValue(Lo64, 0),
1472 StNode->getBasePtr(), MachinePointerInfo(), Alignment,
1475 EVT AddrVT = StNode->getBasePtr().getValueType();
1476 SDValue HiPtr = DAG.getNode(ISD::ADD, DL, AddrVT, StNode->getBasePtr(),
1477 DAG.getConstant(8, DL, AddrVT));
1478 OutChains[1] =
1479 DAG.getStore(StNode->getChain(), DL, SDValue(Hi64, 0), HiPtr,
1480 MachinePointerInfo(), Alignment,
1483 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains);
1484}
1485
1486// Lower a vXi1 store into following instructions
1487// SVMi %1, %vm, 0
1488// STrii %1, (,%addr)
1489// SVMi %2, %vm, 1
1490// STrii %2, 8(,%addr)
1491// ...
1493 SDLoc DL(Op);
1494 StoreSDNode *StNode = dyn_cast<StoreSDNode>(Op.getNode());
1495 assert(StNode && StNode->getOffset().isUndef() && "Unexpected node type");
1496
1497 SDValue BasePtr = StNode->getBasePtr();
1498 Align Alignment = StNode->getAlign();
1499 if (Alignment > 8)
1500 Alignment = Align(8);
1501 EVT AddrVT = BasePtr.getValueType();
1502 EVT MemVT = StNode->getMemoryVT();
1503 if (MemVT == MVT::v256i1 || MemVT == MVT::v4i64) {
1504 SDValue OutChains[4];
1505 for (int i = 0; i < 4; ++i) {
1506 SDNode *V =
1507 DAG.getMachineNode(VE::SVMmi, DL, MVT::i64, StNode->getValue(),
1508 DAG.getTargetConstant(i, DL, MVT::i64));
1509 SDValue Addr = DAG.getNode(ISD::ADD, DL, AddrVT, BasePtr,
1510 DAG.getConstant(8 * i, DL, AddrVT));
1511 OutChains[i] =
1512 DAG.getStore(StNode->getChain(), DL, SDValue(V, 0), Addr,
1513 MachinePointerInfo(), Alignment,
1516 }
1517 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains);
1518 } else if (MemVT == MVT::v512i1 || MemVT == MVT::v8i64) {
1519 SDValue OutChains[8];
1520 for (int i = 0; i < 8; ++i) {
1521 SDNode *V =
1522 DAG.getMachineNode(VE::SVMyi, DL, MVT::i64, StNode->getValue(),
1523 DAG.getTargetConstant(i, DL, MVT::i64));
1524 SDValue Addr = DAG.getNode(ISD::ADD, DL, AddrVT, BasePtr,
1525 DAG.getConstant(8 * i, DL, AddrVT));
1526 OutChains[i] =
1527 DAG.getStore(StNode->getChain(), DL, SDValue(V, 0), Addr,
1528 MachinePointerInfo(), Alignment,
1531 }
1532 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains);
1533 } else {
1534 // Otherwise, ask llvm to expand it.
1535 return SDValue();
1536 }
1537}
1538
1540 StoreSDNode *StNode = cast<StoreSDNode>(Op.getNode());
1541 assert(StNode && StNode->getOffset().isUndef() && "Unexpected node type");
1542 EVT MemVT = StNode->getMemoryVT();
1543
1544 // If VPU is enabled, always expand non-mask vector stores to VVP
1545 if (Subtarget->enableVPU() && MemVT.isVector() && !isMaskType(MemVT))
1546 return lowerToVVP(Op, DAG);
1547
1548 SDValue BasePtr = StNode->getBasePtr();
1549 if (isa<FrameIndexSDNode>(BasePtr.getNode())) {
1550 // Do not expand store instruction with frame index here because of
1551 // dependency problems. We expand it later in eliminateFrameIndex().
1552 return Op;
1553 }
1554
1555 if (MemVT == MVT::f128)
1556 return lowerStoreF128(Op, DAG);
1557 if (isMaskType(MemVT))
1558 return lowerStoreI1(Op, DAG);
1559
1560 // Otherwise, ask llvm to expand it.
1561 return SDValue();
1562}
1563
1567 auto PtrVT = getPointerTy(DAG.getDataLayout());
1568
1569 // Need frame address to find the address of VarArgsFrameIndex.
1571
1572 // vastart just stores the address of the VarArgsFrameIndex slot into the
1573 // memory location argument.
1574 SDLoc DL(Op);
1575 SDValue Offset =
1576 DAG.getNode(ISD::ADD, DL, PtrVT, DAG.getRegister(VE::SX9, PtrVT),
1577 DAG.getIntPtrConstant(FuncInfo->getVarArgsFrameOffset(), DL));
1578 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
1579 return DAG.getStore(Op.getOperand(0), DL, Offset, Op.getOperand(1),
1580 MachinePointerInfo(SV));
1581}
1582
1584 SDNode *Node = Op.getNode();
1585 EVT VT = Node->getValueType(0);
1586 SDValue InChain = Node->getOperand(0);
1587 SDValue VAListPtr = Node->getOperand(1);
1588 EVT PtrVT = VAListPtr.getValueType();
1589 const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
1590 SDLoc DL(Node);
1591 SDValue VAList =
1592 DAG.getLoad(PtrVT, DL, InChain, VAListPtr, MachinePointerInfo(SV));
1593 SDValue Chain = VAList.getValue(1);
1594 SDValue NextPtr;
1595
1596 if (VT == MVT::f128) {
1597 // VE f128 values must be stored with 16 bytes alignment. We don't
1598 // know the actual alignment of VAList, so we take alignment of it
1599 // dynamically.
1600 int Align = 16;
1601 VAList = DAG.getNode(ISD::ADD, DL, PtrVT, VAList,
1602 DAG.getConstant(Align - 1, DL, PtrVT));
1603 VAList = DAG.getNode(ISD::AND, DL, PtrVT, VAList,
1604 DAG.getConstant(-Align, DL, PtrVT));
1605 // Increment the pointer, VAList, by 16 to the next vaarg.
1606 NextPtr =
1607 DAG.getNode(ISD::ADD, DL, PtrVT, VAList, DAG.getIntPtrConstant(16, DL));
1608 } else if (VT == MVT::f32) {
1609 // float --> need special handling like below.
1610 // 0 4
1611 // +------+------+
1612 // | empty| float|
1613 // +------+------+
1614 // Increment the pointer, VAList, by 8 to the next vaarg.
1615 NextPtr =
1616 DAG.getNode(ISD::ADD, DL, PtrVT, VAList, DAG.getIntPtrConstant(8, DL));
1617 // Then, adjust VAList.
1618 unsigned InternalOffset = 4;
1619 VAList = DAG.getNode(ISD::ADD, DL, PtrVT, VAList,
1620 DAG.getConstant(InternalOffset, DL, PtrVT));
1621 } else {
1622 // Increment the pointer, VAList, by 8 to the next vaarg.
1623 NextPtr =
1624 DAG.getNode(ISD::ADD, DL, PtrVT, VAList, DAG.getIntPtrConstant(8, DL));
1625 }
1626
1627 // Store the incremented VAList to the legalized pointer.
1628 InChain = DAG.getStore(Chain, DL, NextPtr, VAListPtr, MachinePointerInfo(SV));
1629
1630 // Load the actual argument out of the pointer VAList.
1631 // We can't count on greater alignment than the word size.
1632 return DAG.getLoad(
1633 VT, DL, InChain, VAList, MachinePointerInfo(),
1634 Align(std::min(PtrVT.getSizeInBits(), VT.getSizeInBits()) / 8));
1635}
1636
1638 SelectionDAG &DAG) const {
1639 // Generate following code.
1640 // (void)__llvm_grow_stack(size);
1641 // ret = GETSTACKTOP; // pseudo instruction
1642 SDLoc DL(Op);
1643
1644 // Get the inputs.
1645 SDNode *Node = Op.getNode();
1646 SDValue Chain = Op.getOperand(0);
1647 SDValue Size = Op.getOperand(1);
1648 MaybeAlign Alignment(Op.getConstantOperandVal(2));
1649 EVT VT = Node->getValueType(0);
1650
1651 // Chain the dynamic stack allocation so that it doesn't modify the stack
1652 // pointer when other instructions are using the stack.
1653 Chain = DAG.getCALLSEQ_START(Chain, 0, 0, DL);
1654
1655 const TargetFrameLowering &TFI = *Subtarget->getFrameLowering();
1656 Align StackAlign = TFI.getStackAlign();
1657 bool NeedsAlign = Alignment.valueOrOne() > StackAlign;
1658
1659 // Prepare arguments
1662 Entry.Node = Size;
1663 Entry.Ty = Entry.Node.getValueType().getTypeForEVT(*DAG.getContext());
1664 Args.push_back(Entry);
1665 if (NeedsAlign) {
1666 Entry.Node = DAG.getConstant(~(Alignment->value() - 1ULL), DL, VT);
1667 Entry.Ty = Entry.Node.getValueType().getTypeForEVT(*DAG.getContext());
1668 Args.push_back(Entry);
1669 }
1671
1672 EVT PtrVT = Op.getValueType();
1673 SDValue Callee;
1674 if (NeedsAlign) {
1675 Callee = DAG.getTargetExternalSymbol("__ve_grow_stack_align", PtrVT, 0);
1676 } else {
1677 Callee = DAG.getTargetExternalSymbol("__ve_grow_stack", PtrVT, 0);
1678 }
1679
1681 CLI.setDebugLoc(DL)
1682 .setChain(Chain)
1683 .setCallee(CallingConv::PreserveAll, RetTy, Callee, std::move(Args))
1684 .setDiscardResult(true);
1685 std::pair<SDValue, SDValue> pair = LowerCallTo(CLI);
1686 Chain = pair.second;
1687 SDValue Result = DAG.getNode(VEISD::GETSTACKTOP, DL, VT, Chain);
1688 if (NeedsAlign) {
1689 Result = DAG.getNode(ISD::ADD, DL, VT, Result,
1690 DAG.getConstant((Alignment->value() - 1ULL), DL, VT));
1691 Result = DAG.getNode(ISD::AND, DL, VT, Result,
1692 DAG.getConstant(~(Alignment->value() - 1ULL), DL, VT));
1693 }
1694 // Chain = Result.getValue(1);
1695 Chain = DAG.getCALLSEQ_END(Chain, 0, 0, SDValue(), DL);
1696
1697 SDValue Ops[2] = {Result, Chain};
1698 return DAG.getMergeValues(Ops, DL);
1699}
1700
1702 SelectionDAG &DAG) const {
1703 SDLoc DL(Op);
1704 return DAG.getNode(VEISD::EH_SJLJ_LONGJMP, DL, MVT::Other, Op.getOperand(0),
1705 Op.getOperand(1));
1706}
1707
1709 SelectionDAG &DAG) const {
1710 SDLoc DL(Op);
1711 return DAG.getNode(VEISD::EH_SJLJ_SETJMP, DL,
1712 DAG.getVTList(MVT::i32, MVT::Other), Op.getOperand(0),
1713 Op.getOperand(1));
1714}
1715
1717 SelectionDAG &DAG) const {
1718 SDLoc DL(Op);
1719 return DAG.getNode(VEISD::EH_SJLJ_SETUP_DISPATCH, DL, MVT::Other,
1720 Op.getOperand(0));
1721}
1722
1724 const VETargetLowering &TLI,
1725 const VESubtarget *Subtarget) {
1726 SDLoc DL(Op);
1728 EVT PtrVT = TLI.getPointerTy(MF.getDataLayout());
1729
1730 MachineFrameInfo &MFI = MF.getFrameInfo();
1731 MFI.setFrameAddressIsTaken(true);
1732
1733 unsigned Depth = Op.getConstantOperandVal(0);
1734 const VERegisterInfo *RegInfo = Subtarget->getRegisterInfo();
1735 Register FrameReg = RegInfo->getFrameRegister(MF);
1736 SDValue FrameAddr =
1737 DAG.getCopyFromReg(DAG.getEntryNode(), DL, FrameReg, PtrVT);
1738 while (Depth--)
1739 FrameAddr = DAG.getLoad(Op.getValueType(), DL, DAG.getEntryNode(),
1740 FrameAddr, MachinePointerInfo());
1741 return FrameAddr;
1742}
1743
1745 const VETargetLowering &TLI,
1746 const VESubtarget *Subtarget) {
1748 MachineFrameInfo &MFI = MF.getFrameInfo();
1749 MFI.setReturnAddressIsTaken(true);
1750
1752 return SDValue();
1753
1754 SDValue FrameAddr = lowerFRAMEADDR(Op, DAG, TLI, Subtarget);
1755
1756 SDLoc DL(Op);
1757 EVT VT = Op.getValueType();
1758 SDValue Offset = DAG.getConstant(8, DL, VT);
1759 return DAG.getLoad(VT, DL, DAG.getEntryNode(),
1760 DAG.getNode(ISD::ADD, DL, VT, FrameAddr, Offset),
1762}
1763
1765 SelectionDAG &DAG) const {
1766 SDLoc DL(Op);
1767 unsigned IntNo = Op.getConstantOperandVal(0);
1768 switch (IntNo) {
1769 default: // Don't custom lower most intrinsics.
1770 return SDValue();
1771 case Intrinsic::eh_sjlj_lsda: {
1773 MVT VT = Op.getSimpleValueType();
1774 const VETargetMachine *TM =
1775 static_cast<const VETargetMachine *>(&DAG.getTarget());
1776
1777 // Create GCC_except_tableXX string. The real symbol for that will be
1778 // generated in EHStreamer::emitExceptionTable() later. So, we just
1779 // borrow it's name here.
1780 TM->getStrList()->push_back(std::string(
1781 (Twine("GCC_except_table") + Twine(MF.getFunctionNumber())).str()));
1782 SDValue Addr =
1783 DAG.getTargetExternalSymbol(TM->getStrList()->back().c_str(), VT, 0);
1784 if (isPositionIndependent()) {
1787 SDValue GlobalBase = DAG.getNode(VEISD::GLOBAL_BASE_REG, DL, VT);
1788 return DAG.getNode(ISD::ADD, DL, VT, GlobalBase, Addr);
1789 }
1791 }
1792 }
1793}
1794
1795static bool getUniqueInsertion(SDNode *N, unsigned &UniqueIdx) {
1796 if (!isa<BuildVectorSDNode>(N))
1797 return false;
1798 const auto *BVN = cast<BuildVectorSDNode>(N);
1799
1800 // Find first non-undef insertion.
1801 unsigned Idx;
1802 for (Idx = 0; Idx < BVN->getNumOperands(); ++Idx) {
1803 auto ElemV = BVN->getOperand(Idx);
1804 if (!ElemV->isUndef())
1805 break;
1806 }
1807 // Catch the (hypothetical) all-undef case.
1808 if (Idx == BVN->getNumOperands())
1809 return false;
1810 // Remember insertion.
1811 UniqueIdx = Idx++;
1812 // Verify that all other insertions are undef.
1813 for (; Idx < BVN->getNumOperands(); ++Idx) {
1814 auto ElemV = BVN->getOperand(Idx);
1815 if (!ElemV->isUndef())
1816 return false;
1817 }
1818 return true;
1819}
1820
1822 if (auto *BuildVec = dyn_cast<BuildVectorSDNode>(N)) {
1823 return BuildVec->getSplatValue();
1824 }
1825 return SDValue();
1826}
1827
1829 SelectionDAG &DAG) const {
1830 VECustomDAG CDAG(DAG, Op);
1831 MVT ResultVT = Op.getSimpleValueType();
1832
1833 // If there is just one element, expand to INSERT_VECTOR_ELT.
1834 unsigned UniqueIdx;
1835 if (getUniqueInsertion(Op.getNode(), UniqueIdx)) {
1836 SDValue AccuV = CDAG.getUNDEF(Op.getValueType());
1837 auto ElemV = Op->getOperand(UniqueIdx);
1838 SDValue IdxV = CDAG.getConstant(UniqueIdx, MVT::i64);
1839 return CDAG.getNode(ISD::INSERT_VECTOR_ELT, ResultVT, {AccuV, ElemV, IdxV});
1840 }
1841
1842 // Else emit a broadcast.
1843 if (SDValue ScalarV = getSplatValue(Op.getNode())) {
1844 unsigned NumEls = ResultVT.getVectorNumElements();
1845 auto AVL = CDAG.getConstant(NumEls, MVT::i32);
1846 return CDAG.getBroadcast(ResultVT, ScalarV, AVL);
1847 }
1848
1849 // Expand
1850 return SDValue();
1851}
1852
1855 // Custom legalization on VVP_* and VEC_* opcodes is required to pack-legalize
1856 // these operations (transform nodes such that their AVL parameter refers to
1857 // packs of 64bit, instead of number of elements.
1858
1859 // Packing opcodes are created with a pack-legal AVL (LEGALAVL). No need to
1860 // re-visit them.
1861 if (isPackingSupportOpcode(Op.getOpcode()))
1862 return Legal;
1863
1864 // Custom lower to legalize AVL for packed mode.
1865 if (isVVPOrVEC(Op.getOpcode()))
1866 return Custom;
1867 return Legal;
1868}
1869
1871 LLVM_DEBUG(dbgs() << "::LowerOperation "; Op.dump(&DAG));
1872 unsigned Opcode = Op.getOpcode();
1873
1874 /// Scalar isel.
1875 switch (Opcode) {
1876 case ISD::ATOMIC_FENCE:
1877 return lowerATOMIC_FENCE(Op, DAG);
1878 case ISD::ATOMIC_SWAP:
1879 return lowerATOMIC_SWAP(Op, DAG);
1880 case ISD::BlockAddress:
1881 return lowerBlockAddress(Op, DAG);
1882 case ISD::ConstantPool:
1883 return lowerConstantPool(Op, DAG);
1885 return lowerDYNAMIC_STACKALLOC(Op, DAG);
1887 return lowerEH_SJLJ_LONGJMP(Op, DAG);
1889 return lowerEH_SJLJ_SETJMP(Op, DAG);
1891 return lowerEH_SJLJ_SETUP_DISPATCH(Op, DAG);
1892 case ISD::FRAMEADDR:
1893 return lowerFRAMEADDR(Op, DAG, *this, Subtarget);
1894 case ISD::GlobalAddress:
1895 return lowerGlobalAddress(Op, DAG);
1897 return lowerGlobalTLSAddress(Op, DAG);
1899 return lowerINTRINSIC_WO_CHAIN(Op, DAG);
1900 case ISD::JumpTable:
1901 return lowerJumpTable(Op, DAG);
1902 case ISD::LOAD:
1903 return lowerLOAD(Op, DAG);
1904 case ISD::RETURNADDR:
1905 return lowerRETURNADDR(Op, DAG, *this, Subtarget);
1906 case ISD::BUILD_VECTOR:
1907 return lowerBUILD_VECTOR(Op, DAG);
1908 case ISD::STORE:
1909 return lowerSTORE(Op, DAG);
1910 case ISD::VASTART:
1911 return lowerVASTART(Op, DAG);
1912 case ISD::VAARG:
1913 return lowerVAARG(Op, DAG);
1914
1916 return lowerINSERT_VECTOR_ELT(Op, DAG);
1918 return lowerEXTRACT_VECTOR_ELT(Op, DAG);
1919 }
1920
1921 /// Vector isel.
1922 if (ISD::isVPOpcode(Opcode))
1923 return lowerToVVP(Op, DAG);
1924
1925 switch (Opcode) {
1926 default:
1927 llvm_unreachable("Should not custom lower this!");
1928
1929 // Legalize the AVL of this internal node.
1931#define ADD_VVP_OP(VVP_NAME, ...) case VEISD::VVP_NAME:
1932#include "VVPNodes.def"
1933 // AVL already legalized.
1934 if (getAnnotatedNodeAVL(Op).second)
1935 return Op;
1936 return legalizeInternalVectorOp(Op, DAG);
1937
1938 // Translate into a VEC_*/VVP_* layer operation.
1939 case ISD::MLOAD:
1940 case ISD::MSTORE:
1941#define ADD_VVP_OP(VVP_NAME, ISD_NAME) case ISD::ISD_NAME:
1942#include "VVPNodes.def"
1943 if (isMaskArithmetic(Op) && isPackedVectorType(Op.getValueType()))
1944 return splitMaskArithmetic(Op, DAG);
1945 return lowerToVVP(Op, DAG);
1946 }
1947}
1948/// } Custom Lower
1949
1952 SelectionDAG &DAG) const {
1953 switch (N->getOpcode()) {
1954 case ISD::ATOMIC_SWAP:
1955 // Let LLVM expand atomic swap instruction through LowerOperation.
1956 return;
1957 default:
1958 LLVM_DEBUG(N->dumpr(&DAG));
1959 llvm_unreachable("Do not know how to custom type legalize this operation!");
1960 }
1961}
1962
1963/// JumpTable for VE.
1964///
1965/// VE cannot generate relocatable symbol in jump table. VE cannot
1966/// generate expressions using symbols in both text segment and data
1967/// segment like below.
1968/// .4byte .LBB0_2-.LJTI0_0
1969/// So, we generate offset from the top of function like below as
1970/// a custom label.
1971/// .4byte .LBB0_2-<function name>
1972
1974 // Use custom label for PIC.
1977
1978 // Otherwise, use the normal jump table encoding heuristics.
1980}
1981
1983 const MachineJumpTableInfo *MJTI, const MachineBasicBlock *MBB,
1984 unsigned Uid, MCContext &Ctx) const {
1986
1987 // Generate custom label for PIC like below.
1988 // .4bytes .LBB0_2-<function name>
1989 const auto *Value = MCSymbolRefExpr::create(MBB->getSymbol(), Ctx);
1991 const auto *Base = MCSymbolRefExpr::create(Sym, Ctx);
1992 return MCBinaryExpr::createSub(Value, Base, Ctx);
1993}
1994
1996 SelectionDAG &DAG) const {
1998 SDLoc DL(Table);
2000 assert(Function != nullptr);
2001 auto PtrTy = getPointerTy(DAG.getDataLayout(), Function->getAddressSpace());
2002
2003 // In the jump table, we have following values in PIC mode.
2004 // .4bytes .LBB0_2-<function name>
2005 // We need to add this value and the address of this function to generate
2006 // .LBB0_2 label correctly under PIC mode. So, we want to generate following
2007 // instructions:
2008 // lea %reg, fun@gotoff_lo
2009 // and %reg, %reg, (32)0
2010 // lea.sl %reg, fun@gotoff_hi(%reg, %got)
2011 // In order to do so, we need to genarate correctly marked DAG node using
2012 // makeHiLoPair.
2013 SDValue Op = DAG.getGlobalAddress(Function, DL, PtrTy);
2016 SDValue GlobalBase = DAG.getNode(VEISD::GLOBAL_BASE_REG, DL, PtrTy);
2017 return DAG.getNode(ISD::ADD, DL, PtrTy, GlobalBase, HiLo);
2018}
2019
2022 MachineBasicBlock *TargetBB,
2023 const DebugLoc &DL) const {
2026 const VEInstrInfo *TII = Subtarget->getInstrInfo();
2027
2028 const TargetRegisterClass *RC = &VE::I64RegClass;
2029 Register Tmp1 = MRI.createVirtualRegister(RC);
2030 Register Tmp2 = MRI.createVirtualRegister(RC);
2031 Register Result = MRI.createVirtualRegister(RC);
2032
2033 if (isPositionIndependent()) {
2034 // Create following instructions for local linkage PIC code.
2035 // lea %Tmp1, TargetBB@gotoff_lo
2036 // and %Tmp2, %Tmp1, (32)0
2037 // lea.sl %Result, TargetBB@gotoff_hi(%Tmp2, %s15) ; %s15 is GOT
2038 BuildMI(MBB, I, DL, TII->get(VE::LEAzii), Tmp1)
2039 .addImm(0)
2040 .addImm(0)
2042 BuildMI(MBB, I, DL, TII->get(VE::ANDrm), Tmp2)
2043 .addReg(Tmp1, getKillRegState(true))
2044 .addImm(M0(32));
2045 BuildMI(MBB, I, DL, TII->get(VE::LEASLrri), Result)
2046 .addReg(VE::SX15)
2047 .addReg(Tmp2, getKillRegState(true))
2049 } else {
2050 // Create following instructions for non-PIC code.
2051 // lea %Tmp1, TargetBB@lo
2052 // and %Tmp2, %Tmp1, (32)0
2053 // lea.sl %Result, TargetBB@hi(%Tmp2)
2054 BuildMI(MBB, I, DL, TII->get(VE::LEAzii), Tmp1)
2055 .addImm(0)
2056 .addImm(0)
2057 .addMBB(TargetBB, VEMCExpr::VK_VE_LO32);
2058 BuildMI(MBB, I, DL, TII->get(VE::ANDrm), Tmp2)
2059 .addReg(Tmp1, getKillRegState(true))
2060 .addImm(M0(32));
2061 BuildMI(MBB, I, DL, TII->get(VE::LEASLrii), Result)
2062 .addReg(Tmp2, getKillRegState(true))
2063 .addImm(0)
2064 .addMBB(TargetBB, VEMCExpr::VK_VE_HI32);
2065 }
2066 return Result;
2067}
2068
2071 StringRef Symbol, const DebugLoc &DL,
2072 bool IsLocal = false,
2073 bool IsCall = false) const {
2076 const VEInstrInfo *TII = Subtarget->getInstrInfo();
2077
2078 const TargetRegisterClass *RC = &VE::I64RegClass;
2079 Register Result = MRI.createVirtualRegister(RC);
2080
2081 if (isPositionIndependent()) {
2082 if (IsCall && !IsLocal) {
2083 // Create following instructions for non-local linkage PIC code function
2084 // calls. These instructions uses IC and magic number -24, so we expand
2085 // them in VEAsmPrinter.cpp from GETFUNPLT pseudo instruction.
2086 // lea %Reg, Symbol@plt_lo(-24)
2087 // and %Reg, %Reg, (32)0
2088 // sic %s16
2089 // lea.sl %Result, Symbol@plt_hi(%Reg, %s16) ; %s16 is PLT
2090 BuildMI(MBB, I, DL, TII->get(VE::GETFUNPLT), Result)
2091 .addExternalSymbol("abort");
2092 } else if (IsLocal) {
2093 Register Tmp1 = MRI.createVirtualRegister(RC);
2094 Register Tmp2 = MRI.createVirtualRegister(RC);
2095 // Create following instructions for local linkage PIC code.
2096 // lea %Tmp1, Symbol@gotoff_lo
2097 // and %Tmp2, %Tmp1, (32)0
2098 // lea.sl %Result, Symbol@gotoff_hi(%Tmp2, %s15) ; %s15 is GOT
2099 BuildMI(MBB, I, DL, TII->get(VE::LEAzii), Tmp1)
2100 .addImm(0)
2101 .addImm(0)
2103 BuildMI(MBB, I, DL, TII->get(VE::ANDrm), Tmp2)
2104 .addReg(Tmp1, getKillRegState(true))
2105 .addImm(M0(32));
2106 BuildMI(MBB, I, DL, TII->get(VE::LEASLrri), Result)
2107 .addReg(VE::SX15)
2108 .addReg(Tmp2, getKillRegState(true))
2110 } else {
2111 Register Tmp1 = MRI.createVirtualRegister(RC);
2112 Register Tmp2 = MRI.createVirtualRegister(RC);
2113 // Create following instructions for not local linkage PIC code.
2114 // lea %Tmp1, Symbol@got_lo
2115 // and %Tmp2, %Tmp1, (32)0
2116 // lea.sl %Tmp3, Symbol@gotoff_hi(%Tmp2, %s15) ; %s15 is GOT
2117 // ld %Result, 0(%Tmp3)
2118 Register Tmp3 = MRI.createVirtualRegister(RC);
2119 BuildMI(MBB, I, DL, TII->get(VE::LEAzii), Tmp1)
2120 .addImm(0)
2121 .addImm(0)
2123 BuildMI(MBB, I, DL, TII->get(VE::ANDrm), Tmp2)
2124 .addReg(Tmp1, getKillRegState(true))
2125 .addImm(M0(32));
2126 BuildMI(MBB, I, DL, TII->get(VE::LEASLrri), Tmp3)
2127 .addReg(VE::SX15)
2128 .addReg(Tmp2, getKillRegState(true))
2130 BuildMI(MBB, I, DL, TII->get(VE::LDrii), Result)
2131 .addReg(Tmp3, getKillRegState(true))
2132 .addImm(0)
2133 .addImm(0);
2134 }
2135 } else {
2136 Register Tmp1 = MRI.createVirtualRegister(RC);
2137 Register Tmp2 = MRI.createVirtualRegister(RC);
2138 // Create following instructions for non-PIC code.
2139 // lea %Tmp1, Symbol@lo
2140 // and %Tmp2, %Tmp1, (32)0
2141 // lea.sl %Result, Symbol@hi(%Tmp2)
2142 BuildMI(MBB, I, DL, TII->get(VE::LEAzii), Tmp1)
2143 .addImm(0)
2144 .addImm(0)
2145 .addExternalSymbol(Symbol.data(), VEMCExpr::VK_VE_LO32);
2146 BuildMI(MBB, I, DL, TII->get(VE::ANDrm), Tmp2)
2147 .addReg(Tmp1, getKillRegState(true))
2148 .addImm(M0(32));
2149 BuildMI(MBB, I, DL, TII->get(VE::LEASLrii), Result)
2150 .addReg(Tmp2, getKillRegState(true))
2151 .addImm(0)
2152 .addExternalSymbol(Symbol.data(), VEMCExpr::VK_VE_HI32);
2153 }
2154 return Result;
2155}
2156
2159 MachineBasicBlock *DispatchBB,
2160 int FI, int Offset) const {
2161 DebugLoc DL = MI.getDebugLoc();
2162 const VEInstrInfo *TII = Subtarget->getInstrInfo();
2163
2164 Register LabelReg =
2166
2167 // Store an address of DispatchBB to a given jmpbuf[1] where has next IC
2168 // referenced by longjmp (throw) later.
2169 MachineInstrBuilder MIB = BuildMI(*MBB, MI, DL, TII->get(VE::STrii));
2170 addFrameReference(MIB, FI, Offset); // jmpbuf[1]
2171 MIB.addReg(LabelReg, getKillRegState(true));
2172}
2173
2176 MachineBasicBlock *MBB) const {
2177 DebugLoc DL = MI.getDebugLoc();
2178 MachineFunction *MF = MBB->getParent();
2179 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
2180 const TargetRegisterInfo *TRI = Subtarget->getRegisterInfo();
2182
2183 const BasicBlock *BB = MBB->getBasicBlock();
2185
2186 // Memory Reference.
2187 SmallVector<MachineMemOperand *, 2> MMOs(MI.memoperands());
2188 Register BufReg = MI.getOperand(1).getReg();
2189
2190 Register DstReg;
2191
2192 DstReg = MI.getOperand(0).getReg();
2193 const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
2194 assert(TRI->isTypeLegalForClass(*RC, MVT::i32) && "Invalid destination!");
2195 (void)TRI;
2196 Register MainDestReg = MRI.createVirtualRegister(RC);
2197 Register RestoreDestReg = MRI.createVirtualRegister(RC);
2198
2199 // For `v = call @llvm.eh.sjlj.setjmp(buf)`, we generate following
2200 // instructions. SP/FP must be saved in jmpbuf before `llvm.eh.sjlj.setjmp`.
2201 //
2202 // ThisMBB:
2203 // buf[3] = %s17 iff %s17 is used as BP
2204 // buf[1] = RestoreMBB as IC after longjmp
2205 // # SjLjSetup RestoreMBB
2206 //
2207 // MainMBB:
2208 // v_main = 0
2209 //
2210 // SinkMBB:
2211 // v = phi(v_main, MainMBB, v_restore, RestoreMBB)
2212 // ...
2213 //
2214 // RestoreMBB:
2215 // %s17 = buf[3] = iff %s17 is used as BP
2216 // v_restore = 1
2217 // goto SinkMBB
2218
2219 MachineBasicBlock *ThisMBB = MBB;
2220 MachineBasicBlock *MainMBB = MF->CreateMachineBasicBlock(BB);
2221 MachineBasicBlock *SinkMBB = MF->CreateMachineBasicBlock(BB);
2222 MachineBasicBlock *RestoreMBB = MF->CreateMachineBasicBlock(BB);
2223 MF->insert(I, MainMBB);
2224 MF->insert(I, SinkMBB);
2225 MF->push_back(RestoreMBB);
2226 RestoreMBB->setMachineBlockAddressTaken();
2227
2228 // Transfer the remainder of BB and its successor edges to SinkMBB.
2229 SinkMBB->splice(SinkMBB->begin(), MBB,
2230 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
2232
2233 // ThisMBB:
2234 Register LabelReg =
2236
2237 // Store BP in buf[3] iff this function is using BP.
2238 const VEFrameLowering *TFI = Subtarget->getFrameLowering();
2239 if (TFI->hasBP(*MF)) {
2240 MachineInstrBuilder MIB = BuildMI(*MBB, MI, DL, TII->get(VE::STrii));
2241 MIB.addReg(BufReg);
2242 MIB.addImm(0);
2243 MIB.addImm(24);
2244 MIB.addReg(VE::SX17);
2245 MIB.setMemRefs(MMOs);
2246 }
2247
2248 // Store IP in buf[1].
2249 MachineInstrBuilder MIB = BuildMI(*MBB, MI, DL, TII->get(VE::STrii));
2250 MIB.add(MI.getOperand(1)); // we can preserve the kill flags here.
2251 MIB.addImm(0);
2252 MIB.addImm(8);
2253 MIB.addReg(LabelReg, getKillRegState(true));
2254 MIB.setMemRefs(MMOs);
2255
2256 // SP/FP are already stored in jmpbuf before `llvm.eh.sjlj.setjmp`.
2257
2258 // Insert setup.
2259 MIB =
2260 BuildMI(*ThisMBB, MI, DL, TII->get(VE::EH_SjLj_Setup)).addMBB(RestoreMBB);
2261
2262 const VERegisterInfo *RegInfo = Subtarget->getRegisterInfo();
2263 MIB.addRegMask(RegInfo->getNoPreservedMask());
2264 ThisMBB->addSuccessor(MainMBB);
2265 ThisMBB->addSuccessor(RestoreMBB);
2266
2267 // MainMBB:
2268 BuildMI(MainMBB, DL, TII->get(VE::LEAzii), MainDestReg)
2269 .addImm(0)
2270 .addImm(0)
2271 .addImm(0);
2272 MainMBB->addSuccessor(SinkMBB);
2273
2274 // SinkMBB:
2275 BuildMI(*SinkMBB, SinkMBB->begin(), DL, TII->get(VE::PHI), DstReg)
2276 .addReg(MainDestReg)
2277 .addMBB(MainMBB)
2278 .addReg(RestoreDestReg)
2279 .addMBB(RestoreMBB);
2280
2281 // RestoreMBB:
2282 // Restore BP from buf[3] iff this function is using BP. The address of
2283 // buf is in SX10.
2284 // FIXME: Better to not use SX10 here
2285 if (TFI->hasBP(*MF)) {
2287 BuildMI(RestoreMBB, DL, TII->get(VE::LDrii), VE::SX17);
2288 MIB.addReg(VE::SX10);
2289 MIB.addImm(0);
2290 MIB.addImm(24);
2291 MIB.setMemRefs(MMOs);
2292 }
2293 BuildMI(RestoreMBB, DL, TII->get(VE::LEAzii), RestoreDestReg)
2294 .addImm(0)
2295 .addImm(0)
2296 .addImm(1);
2297 BuildMI(RestoreMBB, DL, TII->get(VE::BRCFLa_t)).addMBB(SinkMBB);
2298 RestoreMBB->addSuccessor(SinkMBB);
2299
2300 MI.eraseFromParent();
2301 return SinkMBB;
2302}
2303
2306 MachineBasicBlock *MBB) const {
2307 DebugLoc DL = MI.getDebugLoc();
2308 MachineFunction *MF = MBB->getParent();
2309 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
2311
2312 // Memory Reference.
2313 SmallVector<MachineMemOperand *, 2> MMOs(MI.memoperands());
2314 Register BufReg = MI.getOperand(0).getReg();
2315
2316 Register Tmp = MRI.createVirtualRegister(&VE::I64RegClass);
2317 // Since FP is only updated here but NOT referenced, it's treated as GPR.
2318 Register FP = VE::SX9;
2319 Register SP = VE::SX11;
2320
2322
2323 MachineBasicBlock *ThisMBB = MBB;
2324
2325 // For `call @llvm.eh.sjlj.longjmp(buf)`, we generate following instructions.
2326 //
2327 // ThisMBB:
2328 // %fp = load buf[0]
2329 // %jmp = load buf[1]
2330 // %s10 = buf ; Store an address of buf to SX10 for RestoreMBB
2331 // %sp = load buf[2] ; generated by llvm.eh.sjlj.setjmp.
2332 // jmp %jmp
2333
2334 // Reload FP.
2335 MIB = BuildMI(*ThisMBB, MI, DL, TII->get(VE::LDrii), FP);
2336 MIB.addReg(BufReg);
2337 MIB.addImm(0);
2338 MIB.addImm(0);
2339 MIB.setMemRefs(MMOs);
2340
2341 // Reload IP.
2342 MIB = BuildMI(*ThisMBB, MI, DL, TII->get(VE::LDrii), Tmp);
2343 MIB.addReg(BufReg);
2344 MIB.addImm(0);
2345 MIB.addImm(8);
2346 MIB.setMemRefs(MMOs);
2347
2348 // Copy BufReg to SX10 for later use in setjmp.
2349 // FIXME: Better to not use SX10 here
2350 BuildMI(*ThisMBB, MI, DL, TII->get(VE::ORri), VE::SX10)
2351 .addReg(BufReg)
2352 .addImm(0);
2353
2354 // Reload SP.
2355 MIB = BuildMI(*ThisMBB, MI, DL, TII->get(VE::LDrii), SP);
2356 MIB.add(MI.getOperand(0)); // we can preserve the kill flags here.
2357 MIB.addImm(0);
2358 MIB.addImm(16);
2359 MIB.setMemRefs(MMOs);
2360
2361 // Jump.
2362 BuildMI(*ThisMBB, MI, DL, TII->get(VE::BCFLari_t))
2363 .addReg(Tmp, getKillRegState(true))
2364 .addImm(0);
2365
2366 MI.eraseFromParent();
2367 return ThisMBB;
2368}
2369
2372 MachineBasicBlock *BB) const {
2373 DebugLoc DL = MI.getDebugLoc();
2374 MachineFunction *MF = BB->getParent();
2375 MachineFrameInfo &MFI = MF->getFrameInfo();
2377 const VEInstrInfo *TII = Subtarget->getInstrInfo();
2378 int FI = MFI.getFunctionContextIndex();
2379
2380 // Get a mapping of the call site numbers to all of the landing pads they're
2381 // associated with.
2383 unsigned MaxCSNum = 0;
2384 for (auto &MBB : *MF) {
2385 if (!MBB.isEHPad())
2386 continue;
2387
2388 MCSymbol *Sym = nullptr;
2389 for (const auto &MI : MBB) {
2390 if (MI.isDebugInstr())
2391 continue;
2392
2393 assert(MI.isEHLabel() && "expected EH_LABEL");
2394 Sym = MI.getOperand(0).getMCSymbol();
2395 break;
2396 }
2397
2398 if (!MF->hasCallSiteLandingPad(Sym))
2399 continue;
2400
2401 for (unsigned CSI : MF->getCallSiteLandingPad(Sym)) {
2402 CallSiteNumToLPad[CSI].push_back(&MBB);
2403 MaxCSNum = std::max(MaxCSNum, CSI);
2404 }
2405 }
2406
2407 // Get an ordered list of the machine basic blocks for the jump table.
2408 std::vector<MachineBasicBlock *> LPadList;
2410 LPadList.reserve(CallSiteNumToLPad.size());
2411
2412 for (unsigned CSI = 1; CSI <= MaxCSNum; ++CSI) {
2413 for (auto &LP : CallSiteNumToLPad[CSI]) {
2414 LPadList.push_back(LP);
2415 InvokeBBs.insert(LP->pred_begin(), LP->pred_end());
2416 }
2417 }
2418
2419 assert(!LPadList.empty() &&
2420 "No landing pad destinations for the dispatch jump table!");
2421
2422 // The %fn_context is allocated like below (from --print-after=sjljehprepare):
2423 // %fn_context = alloca { i8*, i64, [4 x i64], i8*, i8*, [5 x i8*] }
2424 //
2425 // This `[5 x i8*]` is jmpbuf, so jmpbuf[1] is FI+72.
2426 // First `i64` is callsite, so callsite is FI+8.
2427 static const int OffsetIC = 72;
2428 static const int OffsetCS = 8;
2429
2430 // Create the MBBs for the dispatch code like following:
2431 //
2432 // ThisMBB:
2433 // Prepare DispatchBB address and store it to buf[1].
2434 // ...
2435 //
2436 // DispatchBB:
2437 // %s15 = GETGOT iff isPositionIndependent
2438 // %callsite = load callsite
2439 // brgt.l.t #size of callsites, %callsite, DispContBB
2440 //
2441 // TrapBB:
2442 // Call abort.
2443 //
2444 // DispContBB:
2445 // %breg = address of jump table
2446 // %pc = load and calculate next pc from %breg and %callsite
2447 // jmp %pc
2448
2449 // Shove the dispatch's address into the return slot in the function context.
2450 MachineBasicBlock *DispatchBB = MF->CreateMachineBasicBlock();
2451 DispatchBB->setIsEHPad(true);
2452
2453 // Trap BB will causes trap like `assert(0)`.
2455 DispatchBB->addSuccessor(TrapBB);
2456
2457 MachineBasicBlock *DispContBB = MF->CreateMachineBasicBlock();
2458 DispatchBB->addSuccessor(DispContBB);
2459
2460 // Insert MBBs.
2461 MF->push_back(DispatchBB);
2462 MF->push_back(DispContBB);
2463 MF->push_back(TrapBB);
2464
2465 // Insert code to call abort in the TrapBB.
2466 Register Abort = prepareSymbol(*TrapBB, TrapBB->end(), "abort", DL,
2467 /* Local */ false, /* Call */ true);
2468 BuildMI(TrapBB, DL, TII->get(VE::BSICrii), VE::SX10)
2469 .addReg(Abort, getKillRegState(true))
2470 .addImm(0)
2471 .addImm(0);
2472
2473 // Insert code into the entry block that creates and registers the function
2474 // context.
2475 setupEntryBlockForSjLj(MI, BB, DispatchBB, FI, OffsetIC);
2476
2477 // Create the jump table and associated information
2478 unsigned JTE = getJumpTableEncoding();
2480 unsigned MJTI = JTI->createJumpTableIndex(LPadList);
2481
2482 const VERegisterInfo &RI = TII->getRegisterInfo();
2483 // Add a register mask with no preserved registers. This results in all
2484 // registers being marked as clobbered.
2485 BuildMI(DispatchBB, DL, TII->get(VE::NOP))
2487
2488 if (isPositionIndependent()) {
2489 // Force to generate GETGOT, since current implementation doesn't store GOT
2490 // register.
2491 BuildMI(DispatchBB, DL, TII->get(VE::GETGOT), VE::SX15);
2492 }
2493
2494 // IReg is used as an index in a memory operand and therefore can't be SP
2495 const TargetRegisterClass *RC = &VE::I64RegClass;
2496 Register IReg = MRI.createVirtualRegister(RC);
2497 addFrameReference(BuildMI(DispatchBB, DL, TII->get(VE::LDLZXrii), IReg), FI,
2498 OffsetCS);
2499 if (LPadList.size() < 64) {
2500 BuildMI(DispatchBB, DL, TII->get(VE::BRCFLir_t))
2502 .addImm(LPadList.size())
2503 .addReg(IReg)
2504 .addMBB(TrapBB);
2505 } else {
2506 assert(LPadList.size() <= 0x7FFFFFFF && "Too large Landing Pad!");
2507 Register TmpReg = MRI.createVirtualRegister(RC);
2508 BuildMI(DispatchBB, DL, TII->get(VE::LEAzii), TmpReg)
2509 .addImm(0)
2510 .addImm(0)
2511 .addImm(LPadList.size());
2512 BuildMI(DispatchBB, DL, TII->get(VE::BRCFLrr_t))
2514 .addReg(TmpReg, getKillRegState(true))
2515 .addReg(IReg)
2516 .addMBB(TrapBB);
2517 }
2518
2519 Register BReg = MRI.createVirtualRegister(RC);
2520 Register Tmp1 = MRI.createVirtualRegister(RC);
2521 Register Tmp2 = MRI.createVirtualRegister(RC);
2522
2523 if (isPositionIndependent()) {
2524 // Create following instructions for local linkage PIC code.
2525 // lea %Tmp1, .LJTI0_0@gotoff_lo
2526 // and %Tmp2, %Tmp1, (32)0
2527 // lea.sl %BReg, .LJTI0_0@gotoff_hi(%Tmp2, %s15) ; %s15 is GOT
2528 BuildMI(DispContBB, DL, TII->get(VE::LEAzii), Tmp1)
2529 .addImm(0)
2530 .addImm(0)
2532 BuildMI(DispContBB, DL, TII->get(VE::ANDrm), Tmp2)
2533 .addReg(Tmp1, getKillRegState(true))
2534 .addImm(M0(32));
2535 BuildMI(DispContBB, DL, TII->get(VE::LEASLrri), BReg)
2536 .addReg(VE::SX15)
2537 .addReg(Tmp2, getKillRegState(true))
2539 } else {
2540 // Create following instructions for non-PIC code.
2541 // lea %Tmp1, .LJTI0_0@lo
2542 // and %Tmp2, %Tmp1, (32)0
2543 // lea.sl %BReg, .LJTI0_0@hi(%Tmp2)
2544 BuildMI(DispContBB, DL, TII->get(VE::LEAzii), Tmp1)
2545 .addImm(0)
2546 .addImm(0)
2548 BuildMI(DispContBB, DL, TII->get(VE::ANDrm), Tmp2)
2549 .addReg(Tmp1, getKillRegState(true))
2550 .addImm(M0(32));
2551 BuildMI(DispContBB, DL, TII->get(VE::LEASLrii), BReg)
2552 .addReg(Tmp2, getKillRegState(true))
2553 .addImm(0)
2555 }
2556
2557 switch (JTE) {
2559 // Generate simple block address code for no-PIC model.
2560 // sll %Tmp1, %IReg, 3
2561 // lds %TReg, 0(%Tmp1, %BReg)
2562 // bcfla %TReg
2563
2564 Register TReg = MRI.createVirtualRegister(RC);
2565 Register Tmp1 = MRI.createVirtualRegister(RC);
2566
2567 BuildMI(DispContBB, DL, TII->get(VE::SLLri), Tmp1)
2568 .addReg(IReg, getKillRegState(true))
2569 .addImm(3);
2570 BuildMI(DispContBB, DL, TII->get(VE::LDrri), TReg)
2571 .addReg(BReg, getKillRegState(true))
2572 .addReg(Tmp1, getKillRegState(true))
2573 .addImm(0);
2574 BuildMI(DispContBB, DL, TII->get(VE::BCFLari_t))
2575 .addReg(TReg, getKillRegState(true))
2576 .addImm(0);
2577 break;
2578 }
2580 // Generate block address code using differences from the function pointer
2581 // for PIC model.
2582 // sll %Tmp1, %IReg, 2
2583 // ldl.zx %OReg, 0(%Tmp1, %BReg)
2584 // Prepare function address in BReg2.
2585 // adds.l %TReg, %BReg2, %OReg
2586 // bcfla %TReg
2587
2589 Register OReg = MRI.createVirtualRegister(RC);
2590 Register TReg = MRI.createVirtualRegister(RC);
2591 Register Tmp1 = MRI.createVirtualRegister(RC);
2592
2593 BuildMI(DispContBB, DL, TII->get(VE::SLLri), Tmp1)
2594 .addReg(IReg, getKillRegState(true))
2595 .addImm(2);
2596 BuildMI(DispContBB, DL, TII->get(VE::LDLZXrri), OReg)
2597 .addReg(BReg, getKillRegState(true))
2598 .addReg(Tmp1, getKillRegState(true))
2599 .addImm(0);
2600 Register BReg2 =
2601 prepareSymbol(*DispContBB, DispContBB->end(),
2602 DispContBB->getParent()->getName(), DL, /* Local */ true);
2603 BuildMI(DispContBB, DL, TII->get(VE::ADDSLrr), TReg)
2604 .addReg(OReg, getKillRegState(true))
2605 .addReg(BReg2, getKillRegState(true));
2606 BuildMI(DispContBB, DL, TII->get(VE::BCFLari_t))
2607 .addReg(TReg, getKillRegState(true))
2608 .addImm(0);
2609 break;
2610 }
2611 default:
2612 llvm_unreachable("Unexpected jump table encoding");
2613 }
2614
2615 // Add the jump table entries as successors to the MBB.
2617 for (auto &LP : LPadList)
2618 if (SeenMBBs.insert(LP).second)
2619 DispContBB->addSuccessor(LP);
2620
2621 // N.B. the order the invoke BBs are processed in doesn't matter here.
2623 const MCPhysReg *SavedRegs = MF->getRegInfo().getCalleeSavedRegs();
2624 for (MachineBasicBlock *MBB : InvokeBBs) {
2625 // Remove the landing pad successor from the invoke block and replace it
2626 // with the new dispatch block.
2627 // Keep a copy of Successors since it's modified inside the loop.
2629 MBB->succ_rend());
2630 // FIXME: Avoid quadratic complexity.
2631 for (auto *MBBS : Successors) {
2632 if (MBBS->isEHPad()) {
2633 MBB->removeSuccessor(MBBS);
2634 MBBLPads.push_back(MBBS);
2635 }
2636 }
2637
2638 MBB->addSuccessor(DispatchBB);
2639
2640 // Find the invoke call and mark all of the callee-saved registers as
2641 // 'implicit defined' so that they're spilled. This prevents code from
2642 // moving instructions to before the EH block, where they will never be
2643 // executed.
2644 for (auto &II : reverse(*MBB)) {
2645 if (!II.isCall())
2646 continue;
2647
2649 for (auto &MOp : II.operands())
2650 if (MOp.isReg())
2651 DefRegs[MOp.getReg()] = true;
2652
2653 MachineInstrBuilder MIB(*MF, &II);
2654 for (unsigned RI = 0; SavedRegs[RI]; ++RI) {
2655 Register Reg = SavedRegs[RI];
2656 if (!DefRegs[Reg])
2658 }
2659
2660 break;
2661 }
2662 }
2663
2664 // Mark all former landing pads as non-landing pads. The dispatch is the only
2665 // landing pad now.
2666 for (auto &LP : MBBLPads)
2667 LP->setIsEHPad(false);
2668
2669 // The instruction is gone now.
2670 MI.eraseFromParent();
2671 return BB;
2672}
2673
2676 MachineBasicBlock *BB) const {
2677 switch (MI.getOpcode()) {
2678 default:
2679 llvm_unreachable("Unknown Custom Instruction!");
2680 case VE::EH_SjLj_LongJmp:
2681 return emitEHSjLjLongJmp(MI, BB);
2682 case VE::EH_SjLj_SetJmp:
2683 return emitEHSjLjSetJmp(MI, BB);
2684 case VE::EH_SjLj_Setup_Dispatch:
2685 return emitSjLjDispatchBlock(MI, BB);
2686 }
2687}
2688
2689static bool isSimm7(SDValue V) {
2690 EVT VT = V.getValueType();
2691 if (VT.isVector())
2692 return false;
2693
2694 if (VT.isInteger()) {
2695 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(V))
2696 return isInt<7>(C->getSExtValue());
2697 } else if (VT.isFloatingPoint()) {
2698 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(V)) {
2699 if (VT == MVT::f32 || VT == MVT::f64) {
2700 const APInt &Imm = C->getValueAPF().bitcastToAPInt();
2701 uint64_t Val = Imm.getSExtValue();
2702 if (Imm.getBitWidth() == 32)
2703 Val <<= 32; // Immediate value of float place at higher bits on VE.
2704 return isInt<7>(Val);
2705 }
2706 }
2707 }
2708 return false;
2709}
2710
2711static bool isMImm(SDValue V) {
2712 EVT VT = V.getValueType();
2713 if (VT.isVector())
2714 return false;
2715
2716 if (VT.isInteger()) {
2717 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(V))
2718 return isMImmVal(getImmVal(C));
2719 } else if (VT.isFloatingPoint()) {
2720 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(V)) {
2721 if (VT == MVT::f32) {
2722 // Float value places at higher bits, so ignore lower 32 bits.
2723 return isMImm32Val(getFpImmVal(C) >> 32);
2724 } else if (VT == MVT::f64) {
2725 return isMImmVal(getFpImmVal(C));
2726 }
2727 }
2728 }
2729 return false;
2730}
2731
2732static unsigned decideComp(EVT SrcVT, ISD::CondCode CC) {
2733 if (SrcVT.isFloatingPoint()) {
2734 if (SrcVT == MVT::f128)
2735 return VEISD::CMPQ;
2736 return VEISD::CMPF;
2737 }
2738 return isSignedIntSetCC(CC) ? VEISD::CMPI : VEISD::CMPU;
2739}
2740
2741static EVT decideCompType(EVT SrcVT) {
2742 if (SrcVT == MVT::f128)
2743 return MVT::f64;
2744 return SrcVT;
2745}
2746
2748 bool WithCMov) {
2749 if (SrcVT.isFloatingPoint()) {
2750 // For the case of floating point setcc, only unordered comparison
2751 // or general comparison with -enable-no-nans-fp-math option reach
2752 // here, so it is safe even if values are NaN. Only f128 doesn't
2753 // safe since VE uses f64 result of f128 comparison.
2754 return SrcVT != MVT::f128;
2755 }
2756 if (isIntEqualitySetCC(CC)) {
2757 // For the case of equal or not equal, it is safe without comparison with 0.
2758 return true;
2759 }
2760 if (WithCMov) {
2761 // For the case of integer setcc with cmov, all signed comparison with 0
2762 // are safe.
2763 return isSignedIntSetCC(CC);
2764 }
2765 // For the case of integer setcc, only signed 64 bits comparison is safe.
2766 // For unsigned, "CMPU 0x80000000, 0" has to be greater than 0, but it becomes
2767 // less than 0 witout CMPU. For 32 bits, other half of 32 bits are
2768 // uncoditional, so it is not safe too without CMPI..
2769 return isSignedIntSetCC(CC) && SrcVT == MVT::i64;
2770}
2771
2773 ISD::CondCode CC, bool WithCMov,
2774 const SDLoc &DL, SelectionDAG &DAG) {
2775 // Compare values. If RHS is 0 and it is safe to calculate without
2776 // comparison, we don't generate an instruction for comparison.
2777 EVT CompVT = decideCompType(VT);
2778 if (CompVT == VT && safeWithoutCompWithNull(VT, CC, WithCMov) &&
2780 return LHS;
2781 }
2782 return DAG.getNode(decideComp(VT, CC), DL, CompVT, LHS, RHS);
2783}
2784
2786 DAGCombinerInfo &DCI) const {
2787 assert(N->getOpcode() == ISD::SELECT &&
2788 "Should be called with a SELECT node");
2790 SDValue Cond = N->getOperand(0);
2791 SDValue True = N->getOperand(1);
2792 SDValue False = N->getOperand(2);
2793
2794 // We handle only scalar SELECT.
2795 EVT VT = N->getValueType(0);
2796 if (VT.isVector())
2797 return SDValue();
2798
2799 // Peform combineSelect after leagalize DAG.
2800 if (!DCI.isAfterLegalizeDAG())
2801 return SDValue();
2802
2803 EVT VT0 = Cond.getValueType();
2804 if (isMImm(True)) {
2805 // VE's condition move can handle MImm in True clause, so nothing to do.
2806 } else if (isMImm(False)) {
2807 // VE's condition move can handle MImm in True clause, so swap True and
2808 // False clauses if False has MImm value. And, update condition code.
2809 std::swap(True, False);
2810 CC = getSetCCInverse(CC, VT0);
2811 }
2812
2813 SDLoc DL(N);
2814 SelectionDAG &DAG = DCI.DAG;
2815 VECC::CondCode VECCVal;
2816 if (VT0.isFloatingPoint()) {
2817 VECCVal = fpCondCode2Fcc(CC);
2818 } else {
2819 VECCVal = intCondCode2Icc(CC);
2820 }
2821 SDValue Ops[] = {Cond, True, False,
2822 DAG.getConstant(VECCVal, DL, MVT::i32)};
2823 return DAG.getNode(VEISD::CMOV, DL, VT, Ops);
2824}
2825
2827 DAGCombinerInfo &DCI) const {
2828 assert(N->getOpcode() == ISD::SELECT_CC &&
2829 "Should be called with a SELECT_CC node");
2830 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(4))->get();
2831 SDValue LHS = N->getOperand(0);
2832 SDValue RHS = N->getOperand(1);
2833 SDValue True = N->getOperand(2);
2834 SDValue False = N->getOperand(3);
2835
2836 // We handle only scalar SELECT_CC.
2837 EVT VT = N->getValueType(0);
2838 if (VT.isVector())
2839 return SDValue();
2840
2841 // Peform combineSelectCC after leagalize DAG.
2842 if (!DCI.isAfterLegalizeDAG())
2843 return SDValue();
2844
2845 // We handle only i32/i64/f32/f64/f128 comparisons.
2846 EVT LHSVT = LHS.getValueType();
2847 assert(LHSVT == RHS.getValueType());
2848 switch (LHSVT.getSimpleVT().SimpleTy) {
2849 case MVT::i32:
2850 case MVT::i64:
2851 case MVT::f32:
2852 case MVT::f64:
2853 case MVT::f128:
2854 break;
2855 default:
2856 // Return SDValue to let llvm handle other types.
2857 return SDValue();
2858 }
2859
2860 if (isMImm(RHS)) {
2861 // VE's comparison can handle MImm in RHS, so nothing to do.
2862 } else if (isSimm7(RHS)) {
2863 // VE's comparison can handle Simm7 in LHS, so swap LHS and RHS, and
2864 // update condition code.
2865 std::swap(LHS, RHS);
2866 CC = getSetCCSwappedOperands(CC);
2867 }
2868 if (isMImm(True)) {
2869 // VE's condition move can handle MImm in True clause, so nothing to do.
2870 } else if (isMImm(False)) {
2871 // VE's condition move can handle MImm in True clause, so swap True and
2872 // False clauses if False has MImm value. And, update condition code.
2873 std::swap(True, False);
2874 CC = getSetCCInverse(CC, LHSVT);
2875 }
2876
2877 SDLoc DL(N);
2878 SelectionDAG &DAG = DCI.DAG;
2879
2880 bool WithCMov = true;
2881 SDValue CompNode = generateComparison(LHSVT, LHS, RHS, CC, WithCMov, DL, DAG);
2882
2883 VECC::CondCode VECCVal;
2884 if (LHSVT.isFloatingPoint()) {
2885 VECCVal = fpCondCode2Fcc(CC);
2886 } else {
2887 VECCVal = intCondCode2Icc(CC);
2888 }
2889 SDValue Ops[] = {CompNode, True, False,
2890 DAG.getConstant(VECCVal, DL, MVT::i32)};
2891 return DAG.getNode(VEISD::CMOV, DL, VT, Ops);
2892}
2893
2894static bool isI32InsnAllUses(const SDNode *User, const SDNode *N);
2895static bool isI32Insn(const SDNode *User, const SDNode *N) {
2896 switch (User->getOpcode()) {
2897 default:
2898 return false;
2899 case ISD::ADD:
2900 case ISD::SUB:
2901 case ISD::MUL:
2902 case ISD::SDIV:
2903 case ISD::UDIV:
2904 case ISD::SETCC:
2905 case ISD::SMIN:
2906 case ISD::SMAX:
2907 case ISD::SHL:
2908 case ISD::SRA:
2909 case ISD::BSWAP:
2910 case ISD::SINT_TO_FP:
2911 case ISD::UINT_TO_FP:
2912 case ISD::BR_CC:
2913 case ISD::BITCAST:
2915 case ISD::ATOMIC_SWAP:
2916 case VEISD::CMPU:
2917 case VEISD::CMPI:
2918 return true;
2919 case ISD::SRL:
2920 if (N->getOperand(0).getOpcode() != ISD::SRL)
2921 return true;
2922 // (srl (trunc (srl ...))) may be optimized by combining srl, so
2923 // doesn't optimize trunc now.
2924 return false;
2925 case ISD::SELECT_CC:
2926 if (User->getOperand(2).getNode() != N &&
2927 User->getOperand(3).getNode() != N)
2928 return true;
2929 return isI32InsnAllUses(User, N);
2930 case VEISD::CMOV:
2931 // CMOV in (cmov (trunc ...), true, false, int-comparison) is safe.
2932 // However, trunc in true or false clauses is not safe.
2933 if (User->getOperand(1).getNode() != N &&
2934 User->getOperand(2).getNode() != N &&
2935 isa<ConstantSDNode>(User->getOperand(3))) {
2936 VECC::CondCode VECCVal =
2937 static_cast<VECC::CondCode>(User->getConstantOperandVal(3));
2938 return isIntVECondCode(VECCVal);
2939 }
2940 [[fallthrough]];
2941 case ISD::AND:
2942 case ISD::OR:
2943 case ISD::XOR:
2944 case ISD::SELECT:
2945 case ISD::CopyToReg:
2946 // Check all use of selections, bit operations, and copies. If all of them
2947 // are safe, optimize truncate to extract_subreg.
2948 return isI32InsnAllUses(User, N);
2949 }
2950}
2951
2952static bool isI32InsnAllUses(const SDNode *User, const SDNode *N) {
2953 // Check all use of User node. If all of them are safe, optimize
2954 // truncate to extract_subreg.
2955 for (const SDNode *U : User->uses()) {
2956 switch (U->getOpcode()) {
2957 default:
2958 // If the use is an instruction which treats the source operand as i32,
2959 // it is safe to avoid truncate here.
2960 if (isI32Insn(U, N))
2961 continue;
2962 break;
2963 case ISD::ANY_EXTEND:
2964 case ISD::SIGN_EXTEND:
2965 case ISD::ZERO_EXTEND: {
2966 // Special optimizations to the combination of ext and trunc.
2967 // (ext ... (select ... (trunc ...))) is safe to avoid truncate here
2968 // since this truncate instruction clears higher 32 bits which is filled
2969 // by one of ext instructions later.
2970 assert(N->getValueType(0) == MVT::i32 &&
2971 "find truncate to not i32 integer");
2972 if (User->getOpcode() == ISD::SELECT_CC ||
2973 User->getOpcode() == ISD::SELECT || User->getOpcode() == VEISD::CMOV)
2974 continue;
2975 break;
2976 }
2977 }
2978 return false;
2979 }
2980 return true;
2981}
2982
2983// Optimize TRUNCATE in DAG combining. Optimizing it in CUSTOM lower is
2984// sometime too early. Optimizing it in DAG pattern matching in VEInstrInfo.td
2985// is sometime too late. So, doing it at here.
2987 DAGCombinerInfo &DCI) const {
2988 assert(N->getOpcode() == ISD::TRUNCATE &&
2989 "Should be called with a TRUNCATE node");
2990
2991 SelectionDAG &DAG = DCI.DAG;
2992 SDLoc DL(N);
2993 EVT VT = N->getValueType(0);
2994
2995 // We prefer to do this when all types are legal.
2996 if (!DCI.isAfterLegalizeDAG())
2997 return SDValue();
2998
2999 // Skip combine TRUNCATE atm if the operand of TRUNCATE might be a constant.
3000 if (N->getOperand(0)->getOpcode() == ISD::SELECT_CC &&
3001 isa<ConstantSDNode>(N->getOperand(0)->getOperand(0)) &&
3002 isa<ConstantSDNode>(N->getOperand(0)->getOperand(1)))
3003 return SDValue();
3004
3005 // Check all use of this TRUNCATE.
3006 for (const SDNode *User : N->uses()) {
3007 // Make sure that we're not going to replace TRUNCATE for non i32
3008 // instructions.
3009 //
3010 // FIXME: Although we could sometimes handle this, and it does occur in
3011 // practice that one of the condition inputs to the select is also one of
3012 // the outputs, we currently can't deal with this.
3013 if (isI32Insn(User, N))
3014 continue;
3015
3016 return SDValue();
3017 }
3018
3019 SDValue SubI32 = DAG.getTargetConstant(VE::sub_i32, DL, MVT::i32);
3020 return SDValue(DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL, VT,
3021 N->getOperand(0), SubI32),
3022 0);
3023}
3024
3026 DAGCombinerInfo &DCI) const {
3027 switch (N->getOpcode()) {
3028 default:
3029 break;
3030 case ISD::SELECT:
3031 return combineSelect(N, DCI);
3032 case ISD::SELECT_CC:
3033 return combineSelectCC(N, DCI);
3034 case ISD::TRUNCATE:
3035 return combineTRUNCATE(N, DCI);
3036 }
3037
3038 return SDValue();
3039}
3040
3041//===----------------------------------------------------------------------===//
3042// VE Inline Assembly Support
3043//===----------------------------------------------------------------------===//
3044
3047 if (Constraint.size() == 1) {
3048 switch (Constraint[0]) {
3049 default:
3050 break;
3051 case 'v': // vector registers
3052 return C_RegisterClass;
3053 }
3054 }
3055 return TargetLowering::getConstraintType(Constraint);
3056}
3057
3058std::pair<unsigned, const TargetRegisterClass *>
3060 StringRef Constraint,
3061 MVT VT) const {
3062 const TargetRegisterClass *RC = nullptr;
3063 if (Constraint.size() == 1) {
3064 switch (Constraint[0]) {
3065 default:
3066 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
3067 case 'r':
3068 RC = &VE::I64RegClass;
3069 break;
3070 case 'v':
3071 RC = &VE::V64RegClass;
3072 break;
3073 }
3074 return std::make_pair(0U, RC);
3075 }
3076
3077 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
3078}
3079
3080//===----------------------------------------------------------------------===//
3081// VE Target Optimization Support
3082//===----------------------------------------------------------------------===//
3083
3085 // Specify 8 for PIC model to relieve the impact of PIC load instructions.
3086 if (isJumpTableRelative())
3087 return 8;
3088
3090}
3091
3093 EVT VT = Y.getValueType();
3094
3095 // VE doesn't have vector and not instruction.
3096 if (VT.isVector())
3097 return false;
3098
3099 // VE allows different immediate values for X and Y where ~X & Y.
3100 // Only simm7 works for X, and only mimm works for Y on VE. However, this
3101 // function is used to check whether an immediate value is OK for and-not
3102 // instruction as both X and Y. Generating additional instruction to
3103 // retrieve an immediate value is no good since the purpose of this
3104 // function is to convert a series of 3 instructions to another series of
3105 // 3 instructions with better parallelism. Therefore, we return false
3106 // for all immediate values now.
3107 // FIXME: Change hasAndNot function to have two operands to make it work
3108 // correctly with Aurora VE.
3109 if (isa<ConstantSDNode>(Y))
3110 return false;
3111
3112 // It's ok for generic registers.
3113 return true;
3114}
3115
3117 SelectionDAG &DAG) const {
3118 assert(Op.getOpcode() == ISD::EXTRACT_VECTOR_ELT && "Unknown opcode!");
3119 MVT VT = Op.getOperand(0).getSimpleValueType();
3120
3121 // Special treatment for packed V64 types.
3122 assert(VT == MVT::v512i32 || VT == MVT::v512f32);
3123 (void)VT;
3124 // Example of codes:
3125 // %packed_v = extractelt %vr, %idx / 2
3126 // %v = %packed_v >> (%idx % 2 * 32)
3127 // %res = %v & 0xffffffff
3128
3129 SDValue Vec = Op.getOperand(0);
3130 SDValue Idx = Op.getOperand(1);
3131 SDLoc DL(Op);
3132 SDValue Result = Op;
3133 if (false /* Idx->isConstant() */) {
3134 // TODO: optimized implementation using constant values
3135 } else {
3136 SDValue Const1 = DAG.getConstant(1, DL, MVT::i64);
3137 SDValue HalfIdx = DAG.getNode(ISD::SRL, DL, MVT::i64, {Idx, Const1});
3138 SDValue PackedElt =
3139 SDValue(DAG.getMachineNode(VE::LVSvr, DL, MVT::i64, {Vec, HalfIdx}), 0);
3140 SDValue AndIdx = DAG.getNode(ISD::AND, DL, MVT::i64, {Idx, Const1});
3141 SDValue Shift = DAG.getNode(ISD::XOR, DL, MVT::i64, {AndIdx, Const1});
3142 SDValue Const5 = DAG.getConstant(5, DL, MVT::i64);
3143 Shift = DAG.getNode(ISD::SHL, DL, MVT::i64, {Shift, Const5});
3144 PackedElt = DAG.getNode(ISD::SRL, DL, MVT::i64, {PackedElt, Shift});
3145 SDValue Mask = DAG.getConstant(0xFFFFFFFFL, DL, MVT::i64);
3146 PackedElt = DAG.getNode(ISD::AND, DL, MVT::i64, {PackedElt, Mask});
3147 SDValue SubI32 = DAG.getTargetConstant(VE::sub_i32, DL, MVT::i32);
3148 Result = SDValue(DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL,
3149 MVT::i32, PackedElt, SubI32),
3150 0);
3151
3152 if (Op.getSimpleValueType() == MVT::f32) {
3153 Result = DAG.getBitcast(MVT::f32, Result);
3154 } else {
3155 assert(Op.getSimpleValueType() == MVT::i32);
3156 }
3157 }
3158 return Result;
3159}
3160
3162 SelectionDAG &DAG) const {
3163 assert(Op.getOpcode() == ISD::INSERT_VECTOR_ELT && "Unknown opcode!");
3164 MVT VT = Op.getOperand(0).getSimpleValueType();
3165
3166 // Special treatment for packed V64 types.
3167 assert(VT == MVT::v512i32 || VT == MVT::v512f32);
3168 (void)VT;
3169 // The v512i32 and v512f32 starts from upper bits (0..31). This "upper
3170 // bits" required `val << 32` from C implementation's point of view.
3171 //
3172 // Example of codes:
3173 // %packed_elt = extractelt %vr, (%idx >> 1)
3174 // %shift = ((%idx & 1) ^ 1) << 5
3175 // %packed_elt &= 0xffffffff00000000 >> shift
3176 // %packed_elt |= (zext %val) << shift
3177 // %vr = insertelt %vr, %packed_elt, (%idx >> 1)
3178
3179 SDLoc DL(Op);
3180 SDValue Vec = Op.getOperand(0);
3181 SDValue Val = Op.getOperand(1);
3182 SDValue Idx = Op.getOperand(2);
3183 if (Idx.getSimpleValueType() == MVT::i32)
3184 Idx = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, Idx);
3185 if (Val.getSimpleValueType() == MVT::f32)
3186 Val = DAG.getBitcast(MVT::i32, Val);
3187 assert(Val.getSimpleValueType() == MVT::i32);
3188 Val = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, Val);
3189
3190 SDValue Result = Op;
3191 if (false /* Idx->isConstant()*/) {
3192 // TODO: optimized implementation using constant values
3193 } else {
3194 SDValue Const1 = DAG.getConstant(1, DL, MVT::i64);
3195 SDValue HalfIdx = DAG.getNode(ISD::SRL, DL, MVT::i64, {Idx, Const1});
3196 SDValue PackedElt =
3197 SDValue(DAG.getMachineNode(VE::LVSvr, DL, MVT::i64, {Vec, HalfIdx}), 0);
3198 SDValue AndIdx = DAG.getNode(ISD::AND, DL, MVT::i64, {Idx, Const1});
3199 SDValue Shift = DAG.getNode(ISD::XOR, DL, MVT::i64, {AndIdx, Const1});
3200 SDValue Const5 = DAG.getConstant(5, DL, MVT::i64);
3201 Shift = DAG.getNode(ISD::SHL, DL, MVT::i64, {Shift, Const5});
3202 SDValue Mask = DAG.getConstant(0xFFFFFFFF00000000L, DL, MVT::i64);
3203 Mask = DAG.getNode(ISD::SRL, DL, MVT::i64, {Mask, Shift});
3204 PackedElt = DAG.getNode(ISD::AND, DL, MVT::i64, {PackedElt, Mask});
3205 Val = DAG.getNode(ISD::SHL, DL, MVT::i64, {Val, Shift});
3206 PackedElt = DAG.getNode(ISD::OR, DL, MVT::i64, {PackedElt, Val});
3207 Result =
3208 SDValue(DAG.getMachineNode(VE::LSVrr_v, DL, Vec.getSimpleValueType(),
3209 {HalfIdx, PackedElt, Vec}),
3210 0);
3211 }
3212 return Result;
3213}
unsigned const MachineRegisterInfo * MRI
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Function Alias Analysis Results
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
return RetTy
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
#define LLVM_DEBUG(X)
Definition: Debug.h:101
uint64_t Addr
uint64_t Size
Symbol * Sym
Definition: ELF_riscv.cpp:479
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
#define RegName(no)
#define I(x, y, z)
Definition: MD5.cpp:58
unsigned const TargetRegisterInfo * TRI
Module.h This file contains the declarations for the Module class.
uint64_t IntrinsicInst * II
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
static CodeModel::Model getCodeModel(const PPCSubtarget &S, const TargetMachine &TM, const MachineOperand &MO)
const char LLVMTargetMachineRef TM
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file implements the StringSwitch template, which mimics a switch() statement whose cases are str...
static unsigned decideComp(EVT SrcVT, ISD::CondCode CC)
static bool isSimm7(SDValue V)
CCAssignFn * getParamCC(CallingConv::ID CallConv, bool IsVarArg)
static SDValue lowerLoadF128(SDValue Op, SelectionDAG &DAG)
static bool isMImm(SDValue V)
static SDValue prepareTS1AM(SDValue Op, SelectionDAG &DAG, SDValue &Flag, SDValue &Bits)
CCAssignFn * getReturnCC(CallingConv::ID CallConv)
static bool safeWithoutCompWithNull(EVT SrcVT, ISD::CondCode CC, bool WithCMov)
static bool isI32InsnAllUses(const SDNode *User, const SDNode *N)
static SDValue lowerLoadI1(SDValue Op, SelectionDAG &DAG)
static SDValue generateComparison(EVT VT, SDValue LHS, SDValue RHS, ISD::CondCode CC, bool WithCMov, const SDLoc &DL, SelectionDAG &DAG)
static EVT decideCompType(EVT SrcVT)
static bool isI32Insn(const SDNode *User, const SDNode *N)
static SDValue lowerFRAMEADDR(SDValue Op, SelectionDAG &DAG, const VETargetLowering &TLI, const VESubtarget *Subtarget)
static const MVT AllMaskVTs[]
static bool getUniqueInsertion(SDNode *N, unsigned &UniqueIdx)
static SDValue lowerRETURNADDR(SDValue Op, SelectionDAG &DAG, const VETargetLowering &TLI, const VESubtarget *Subtarget)
static const MVT AllVectorVTs[]
static const MVT AllPackedVTs[]
static SDValue finalizeTS1AM(SDValue Op, SelectionDAG &DAG, SDValue Data, SDValue Bits)
static SDValue lowerStoreF128(SDValue Op, SelectionDAG &DAG)
static SDValue lowerStoreI1(SDValue Op, SelectionDAG &DAG)
#define TARGET_NODE_CASE(NAME)
Value * RHS
Value * LHS
Class for arbitrary precision integers.
Definition: APInt.h:78
an instruction that atomically reads a memory location, combines it with another value,...
Definition: Instructions.h:696
BinOp getOperation() const
Definition: Instructions.h:787
This is an SDNode representing atomic operations.
LLVM Basic Block Representation.
Definition: BasicBlock.h:61
CCState - This class holds information needed while lowering arguments and return values.
void AnalyzeCallResult(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
AnalyzeCallResult - Analyze the return values of a call, incorporating info about the passed values i...
bool CheckReturn(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
CheckReturn - Analyze the return values of a function, returning true if the return can be performed ...
void AnalyzeReturn(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
AnalyzeReturn - Analyze the returned values of a return, incorporating info about the result values i...
int64_t AllocateStack(unsigned Size, Align Alignment)
AllocateStack - Allocate a chunk of stack space with the specified size and alignment.
void AnalyzeCallOperands(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
AnalyzeCallOperands - Analyze the outgoing arguments to a call, incorporating info about the passed v...
uint64_t getStackSize() const
Returns the size of the currently allocated portion of the stack.
void AnalyzeFormalArguments(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
AnalyzeFormalArguments - Analyze an array of argument values, incorporating info about the formals in...
CCValAssign - Represent assignment of one arg/retval to a location.
bool isRegLoc() const
Register getLocReg() const
LocInfo getLocInfo() const
bool needsCustom() const
bool isMemLoc() const
bool isExtInLoc() const
int64_t getLocMemOffset() const
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:63
A debug info location.
Definition: DebugLoc.h:33
unsigned size() const
Definition: DenseMap.h:99
unsigned getAddressSpace() const
Definition: GlobalValue.h:205
Common base class shared among various IRBuilders.
Definition: IRBuilder.h:91
FenceInst * CreateFence(AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System, const Twine &Name="")
Definition: IRBuilder.h:1851
bool hasAtomicStore() const LLVM_READONLY
Return true if this atomic instruction stores to memory.
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
This class is used to represent ISD::LOAD nodes.
const SDValue & getBasePtr() const
const SDValue & getOffset() const
static const MCBinaryExpr * createSub(const MCExpr *LHS, const MCExpr *RHS, MCContext &Ctx)
Definition: MCExpr.h:617
Context object for machine code objects.
Definition: MCContext.h:83
MCSymbol * getOrCreateSymbol(const Twine &Name)
Lookup the symbol inside with the specified Name.
Definition: MCContext.cpp:213
Base class for the full range of assembler expressions which are needed for parsing.
Definition: MCExpr.h:34
static const MCSymbolRefExpr * create(const MCSymbol *Symbol, MCContext &Ctx)
Definition: MCExpr.h:393
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
Definition: MCSymbol.h:41
Machine Value Type.
SimpleValueType SimpleTy
uint64_t getScalarSizeInBits() const
unsigned getVectorNumElements() const
static auto integer_valuetypes()
static auto vector_valuetypes()
MVT getVectorElementType() const
static MVT getIntegerVT(unsigned BitWidth)
static auto fp_valuetypes()
void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
bool isEHPad() const
Returns true if the block is a landing pad.
MCSymbol * getSymbol() const
Return the MCSymbol for this basic block.
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
void removeSuccessor(MachineBasicBlock *Succ, bool NormalizeSuccProbs=false)
Remove successor from the successors list of this MachineBasicBlock.
succ_reverse_iterator succ_rbegin()
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
succ_reverse_iterator succ_rend()
void setMachineBlockAddressTaken()
Set this block to indicate that its address is used as something other than the target of a terminato...
void setIsEHPad(bool V=true)
Indicates the block is a landing pad.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
void setFrameAddressIsTaken(bool T)
void setReturnAddressIsTaken(bool s)
int getFunctionContextIndex() const
Return the index for the function context object.
unsigned getFunctionNumber() const
getFunctionNumber - Return a unique ID for the current function.
MachineJumpTableInfo * getOrCreateJumpTableInfo(unsigned JTEntryKind)
getOrCreateJumpTableInfo - Get the JumpTableInfo for this function, if it does already exist,...
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
void push_back(MachineBasicBlock *MBB)
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
bool hasCallSiteLandingPad(MCSymbol *Sym)
Return true if the landing pad Eh symbol has an associated call site.
Register addLiveIn(MCRegister PReg, const TargetRegisterClass *RC)
addLiveIn - Add the specified physical register as a live-in value and create a corresponding virtual...
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *BB=nullptr, std::optional< UniqueBBID > BBID=std::nullopt)
CreateMachineBasicBlock - Allocate a new MachineBasicBlock.
void insert(iterator MBBI, MachineBasicBlock *MBB)
SmallVectorImpl< unsigned > & getCallSiteLandingPad(MCSymbol *Sym)
Get the call site indexes for a landing pad EH symbol.
const MachineInstrBuilder & setMemRefs(ArrayRef< MachineMemOperand * > MMOs) const
const MachineInstrBuilder & addExternalSymbol(const char *FnName, unsigned TargetFlags=0) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addRegMask(const uint32_t *Mask) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addJumpTableIndex(unsigned Idx, unsigned TargetFlags=0) const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
Representation of each machine instruction.
Definition: MachineInstr.h:69
unsigned createJumpTableIndex(const std::vector< MachineBasicBlock * > &DestBBs)
createJumpTableIndex - Create a new jump table.
@ EK_Custom32
EK_Custom32 - Each entry is a 32-bit value that is custom lowered by the TargetLowering::LowerCustomJ...
@ EK_BlockAddress
EK_BlockAddress - Each entry is a plain address of block, e.g.: .word LBB123.
Flags
Flags values. These may be or'd together.
@ MOVolatile
The memory access is volatile.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
const MCPhysReg * getCalleeSavedRegs() const
Returns list of callee saved registers.
Align getAlign() const
bool isVolatile() const
const MachinePointerInfo & getPointerInfo() const
const SDValue & getChain() const
EVT getMemoryVT() const
Return the type of the in-memory value.
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
bool isUndef() const
SDNode * getNode() const
get the SDNode which holds the desired result
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
const SDValue & getOperand(unsigned i) const
MVT getSimpleValueType() const
Return the simple ValueType of the referenced return value.
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
Definition: SelectionDAG.h:226
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
Definition: SelectionDAG.h:733
SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s),...
SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
SDValue getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, SDValue Chain, SDValue Ptr, SDValue Val, MachineMemOperand *MMO)
Gets a node for an atomic op, produces result (if relevant) and chain and takes 2 operands.
SDValue getTargetJumpTable(int JTI, EVT VT, unsigned TargetFlags=0)
Definition: SelectionDAG.h:743
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it's not CSE'd).
SDValue getBitcast(EVT VT, SDValue V)
Return a bitcast using the SDLoc of the value operand, and casting to the provided type.
const DataLayout & getDataLayout() const
Definition: SelectionDAG.h:487
SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
SDValue getGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, bool isTargetGA=false, unsigned TargetFlags=0)
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)
Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...
SDValue getRegister(unsigned Reg, EVT VT)
const TargetMachine & getTarget() const
Definition: SelectionDAG.h:488
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, unsigned Reg, SDValue N)
Definition: SelectionDAG.h:784
SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
SDValue getValueType(EVT)
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
Definition: SelectionDAG.h:687
SDValue getTargetBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, unsigned TargetFlags=0)
Definition: SelectionDAG.h:779
MachineFunction & getMachineFunction() const
Definition: SelectionDAG.h:482
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, unsigned Reg, EVT VT)
Definition: SelectionDAG.h:810
SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
SDValue getRegisterMask(const uint32_t *RegMask)
LLVMContext * getContext() const
Definition: SelectionDAG.h:500
SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned TargetFlags=0)
SDValue getTargetConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offset=0, unsigned TargetFlags=0)
Definition: SelectionDAG.h:750
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
Definition: SelectionDAG.h:570
void reserve(size_type NumEntries)
Definition: SmallPtrSet.h:114
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:368
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:503
bool empty() const
Definition: SmallVector.h:95
size_t size() const
Definition: SmallVector.h:92
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:587
void push_back(const T &Elt)
Definition: SmallVector.h:427
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1210
This class is used to represent ISD::STORE nodes.
const SDValue & getBasePtr() const
const SDValue & getOffset() const
const SDValue & getValue() const
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
constexpr size_t size() const
size - Get the string size.
Definition: StringRef.h:137
constexpr const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Definition: StringRef.h:131
A switch()-like statement whose cases are string literals.
Definition: StringSwitch.h:44
StringSwitch & Case(StringLiteral S, T Value)
Definition: StringSwitch.h:69
R Default(T Value)
Definition: StringSwitch.h:182
Information about stack frame layout on the target.
Align getStackAlign() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
TargetInstrInfo - Interface to description of machine instruction set.
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
LegalizeAction
This enum indicates whether operations are valid for a target, and if not, what action should be used...
virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const
Return the register class that should be used for the specified value type.
void setMinStackArgumentAlignment(Align Alignment)
Set the minimum stack alignment of an argument.
virtual unsigned getMinimumJumpTableEntries() const
Return lower limit for number of blocks in a jump table.
const TargetMachine & getTargetMachine() const
void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits)
Set the maximum atomic operation size supported by the backend.
void setMinFunctionAlignment(Align Alignment)
Set the target's minimum function alignment.
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose.
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type.
void setSupportsUnalignedAtomics(bool UnalignedSupported)
Sets whether unaligned atomic operations are supported.
virtual bool isJumpTableRelative() const
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
void setLibcallName(RTLIB::Libcall Call, const char *Name)
Rename the default libcall routine name for the specified libcall.
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
void setMinCmpXchgSizeInBits(unsigned SizeInBits)
Sets the minimum cmpxchg or ll/sc size supported by the backend.
void setStackPointerRegisterToSaveRestore(Register R)
If set to a physical register, this specifies the register that llvm.savestack/llvm....
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
void setTargetDAGCombine(ArrayRef< ISD::NodeType > NTs)
Targets should invoke this method for each target independent node that they want to provide a custom...
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
std::vector< ArgListEntry > ArgListTy
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
This function lowers an abstract call to a function into an actual call.
bool isPositionIndependent() const
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
bool verifyReturnAddressArgumentIsConstant(SDValue Op, SelectionDAG &DAG) const
virtual unsigned getJumpTableEncoding() const
Return the entry encoding for a jump table in the current function.
Primary interface to the complete machine description for the target machine.
Definition: TargetMachine.h:77
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
static Type * getVoidTy(LLVMContext &C)
Value * getOperand(unsigned i) const
Definition: User.h:169
SDValue getBroadcast(EVT ResultVT, SDValue Scalar, SDValue AVL) const
SDValue getNode(unsigned OC, SDVTList VTL, ArrayRef< SDValue > OpV, std::optional< SDNodeFlags > Flags=std::nullopt) const
getNode {
Definition: VECustomDAG.h:156
SDValue getUNDEF(EVT VT) const
Definition: VECustomDAG.h:180
SDValue getConstant(uint64_t Val, EVT VT, bool IsTarget=false, bool IsOpaque=false) const
bool hasBP(const MachineFunction &MF) const
Register getGlobalBaseReg(MachineFunction *MF) const
} Optimization
@ VK_VE_GOTOFF_HI32
Definition: VEMCExpr.h:34
@ VK_VE_GOTOFF_LO32
Definition: VEMCExpr.h:35
bool enableVPU() const
Definition: VESubtarget.h:65
unsigned getRsaSize() const
Get the size of RSA, return address, and frame pointer as described in VEFrameLowering....
Definition: VESubtarget.h:79
const VEInstrInfo * getInstrInfo() const override
Definition: VESubtarget.h:51
const VEFrameLowering * getFrameLowering() const override
Definition: VESubtarget.h:52
const VERegisterInfo * getRegisterInfo() const override
Definition: VESubtarget.h:55
SDValue splitMaskArithmetic(SDValue Op, SelectionDAG &DAG) const
SDValue lowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const
std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const override
Given a physical register constraint (e.g.
const char * getTargetNodeName(unsigned Opcode) const override
This method returns the name of a target specific DAG node.
bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override
Return true if folding a constant offset with the given GlobalAddress is legal.
SDValue lowerToVVP(SDValue Op, SelectionDAG &DAG) const
} Custom Inserter
SDValue lowerJumpTable(SDValue Op, SelectionDAG &DAG) const
SDValue getPICJumpTableRelocBase(SDValue Table, SelectionDAG &DAG) const override
Returns relocation base for the given PIC jumptable.
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const override
getSetCCResultType - Return the ISD::SETCC ValueType
SDValue lowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const
bool hasAndNot(SDValue Y) const override
Return true if the target has a bitwise and-not operation: X = ~A & B This can be used to simplify se...
SDValue lowerVAARG(SDValue Op, SelectionDAG &DAG) const
SDValue combineSelect(SDNode *N, DAGCombinerInfo &DCI) const
bool isFPImmLegal(const APFloat &Imm, EVT VT, bool ForCodeSize) const override
isFPImmLegal - Returns true if the target can instruction select the specified FP immediate natively.
VETargetLowering(const TargetMachine &TM, const VESubtarget &STI)
Instruction * emitLeadingFence(IRBuilderBase &Builder, Instruction *Inst, AtomicOrdering Ord) const override
Custom Lower {.
SDValue lowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG) const
bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF, bool isVarArg, const SmallVectorImpl< ISD::OutputArg > &ArgsFlags, LLVMContext &Context) const override
This hook should be implemented to check whether the return values described by the Outs array can fi...
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
This callback is invoked for operations that are unsupported by the target, which are registered to u...
SDValue lowerLOAD(SDValue Op, SelectionDAG &DAG) const
MachineBasicBlock * emitEHSjLjLongJmp(MachineInstr &MI, MachineBasicBlock *MBB) const
SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override
} VVPLowering
TargetLoweringBase::AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override
Returns how the IR-level AtomicExpand pass should expand the given AtomicRMW, if at all.
SDValue lowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const
SDValue combineSelectCC(SDNode *N, DAGCombinerInfo &DCI) const
SDValue lowerEH_SJLJ_SETUP_DISPATCH(SDValue Op, SelectionDAG &DAG) const
unsigned getMinimumJumpTableEntries() const override
} Inline Assembly
SDValue lowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const
SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, const SmallVectorImpl< SDValue > &OutVals, const SDLoc &dl, SelectionDAG &DAG) const override
This hook must be implemented to lower outgoing return values, described by the Outs array,...
MachineBasicBlock * emitSjLjDispatchBlock(MachineInstr &MI, MachineBasicBlock *BB) const
Instruction * emitTrailingFence(IRBuilderBase &Builder, Instruction *Inst, AtomicOrdering Ord) const override
Register prepareMBB(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, MachineBasicBlock *TargetBB, const DebugLoc &DL) const
void setupEntryBlockForSjLj(MachineInstr &MI, MachineBasicBlock *MBB, MachineBasicBlock *DispatchBB, int FI, int Offset) const
SDValue lowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const
MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *MBB) const override
Custom Inserter {.
MachineBasicBlock * emitEHSjLjSetJmp(MachineInstr &MI, MachineBasicBlock *MBB) const
bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AS, Align A, MachineMemOperand::Flags Flags, unsigned *Fast) const override
Returns true if the target allows unaligned memory accesses of the specified type.
SDValue lowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const
SDValue lowerSTORE(SDValue Op, SelectionDAG &DAG) const
TargetLoweringBase::LegalizeAction getCustomOperationAction(SDNode &) const override
Custom Lower {.
SDValue makeAddress(SDValue Op, SelectionDAG &DAG) const
SDValue lowerConstantPool(SDValue Op, SelectionDAG &DAG) const
SDValue lowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const
SDValue legalizeInternalVectorOp(SDValue Op, SelectionDAG &DAG) const
Register prepareSymbol(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, StringRef Symbol, const DebugLoc &DL, bool IsLocal, bool IsCall) const
SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::InputArg > &Ins, const SDLoc &dl, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals) const override
This hook must be implemented to lower the incoming (formal) arguments, described by the Ins array,...
void ReplaceNodeResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override
} Custom Lower
SDValue lowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const
SDValue LowerCall(TargetLowering::CallLoweringInfo &CLI, SmallVectorImpl< SDValue > &InVals) const override
This hook must be implemented to lower calls into the specified DAG.
SDValue withTargetFlags(SDValue Op, unsigned TF, SelectionDAG &DAG) const
} Custom DAGCombine
SDValue combineTRUNCATE(SDNode *N, DAGCombinerInfo &DCI) const
const MCExpr * LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI, const MachineBasicBlock *MBB, unsigned Uid, MCContext &Ctx) const override
SDValue lowerVASTART(SDValue Op, SelectionDAG &DAG) const
unsigned getJumpTableEncoding() const override
JumpTable for VE.
SDValue lowerATOMIC_SWAP(SDValue Op, SelectionDAG &DAG) const
SDValue lowerBlockAddress(SDValue Op, SelectionDAG &DAG) const
Register getRegisterByName(const char *RegName, LLT VT, const MachineFunction &MF) const override
Return the register ID of the name passed in.
SDValue makeHiLoPair(SDValue Op, unsigned HiTF, unsigned LoTF, SelectionDAG &DAG) const
ConstraintType getConstraintType(StringRef Constraint) const override
Inline Assembly {.
SDValue lowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const
SDValue lowerToTLSGeneralDynamicModel(SDValue Op, SelectionDAG &DAG) const
LLVM Value Representation.
Definition: Value.h:74
iterator_range< use_iterator > uses()
Definition: Value.h:376
self_iterator getIterator()
Definition: ilist_node.h:132
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ PreserveAll
Used for runtime calls that preserves (almost) all registers.
Definition: CallingConv.h:66
@ Fast
Attempts to make calls as fast as possible (e.g.
Definition: CallingConv.h:41
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
Definition: ISDOpcodes.h:40
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
Definition: ISDOpcodes.h:779
@ STACKRESTORE
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain.
Definition: ISDOpcodes.h:1194
@ STACKSAVE
STACKSAVE - STACKSAVE has one operand, an input chain.
Definition: ISDOpcodes.h:1190
@ CTLZ_ZERO_UNDEF
Definition: ISDOpcodes.h:752
@ MLOAD
Masked load and store - consecutive vector load and store operations with additional mask operand tha...
Definition: ISDOpcodes.h:1355
@ VECREDUCE_SMIN
Definition: ISDOpcodes.h:1440
@ EH_SJLJ_LONGJMP
OUTCHAIN = EH_SJLJ_LONGJMP(INCHAIN, buffer) This corresponds to the eh.sjlj.longjmp intrinsic.
Definition: ISDOpcodes.h:153
@ SMUL_LOHI
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
Definition: ISDOpcodes.h:257
@ ATOMIC_LOAD_NAND
Definition: ISDOpcodes.h:1337
@ BSWAP
Byte Swap and Counting operators.
Definition: ISDOpcodes.h:743
@ VAEND
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE.
Definition: ISDOpcodes.h:1223
@ ConstantFP
Definition: ISDOpcodes.h:77
@ ATOMIC_LOAD_MAX
Definition: ISDOpcodes.h:1339
@ ATOMIC_LOAD_UMIN
Definition: ISDOpcodes.h:1340
@ ADD
Simple integer binary arithmetic operators.
Definition: ISDOpcodes.h:246
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
Definition: ISDOpcodes.h:1099
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
Definition: ISDOpcodes.h:813
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
Definition: ISDOpcodes.h:497
@ EH_SJLJ_SETUP_DISPATCH
OUTCHAIN = EH_SJLJ_SETUP_DISPATCH(INCHAIN) The target initializes the dispatch table here.
Definition: ISDOpcodes.h:157
@ GlobalAddress
Definition: ISDOpcodes.h:78
@ ATOMIC_CMP_SWAP_WITH_SUCCESS
Val, Success, OUTCHAIN = ATOMIC_CMP_SWAP_WITH_SUCCESS(INCHAIN, ptr, cmp, swap) N.b.
Definition: ISDOpcodes.h:1322
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
Definition: ISDOpcodes.h:840
@ MEMBARRIER
MEMBARRIER - Compiler barrier only; generate a no-op.
Definition: ISDOpcodes.h:1296
@ ATOMIC_FENCE
OUTCHAIN = ATOMIC_FENCE(INCHAIN, ordering, scope) This corresponds to the fence instruction.
Definition: ISDOpcodes.h:1301
@ SDIVREM
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
Definition: ISDOpcodes.h:262
@ VECREDUCE_SMAX
Definition: ISDOpcodes.h:1439
@ FP16_TO_FP
FP16_TO_FP, FP_TO_FP16 - These operators are used to perform promotions and truncation for half-preci...
Definition: ISDOpcodes.h:963
@ ATOMIC_LOAD_OR
Definition: ISDOpcodes.h:1335
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
Definition: ISDOpcodes.h:953
@ ATOMIC_LOAD_XOR
Definition: ISDOpcodes.h:1336
@ GlobalTLSAddress
Definition: ISDOpcodes.h:79
@ SIGN_EXTEND
Conversion operators.
Definition: ISDOpcodes.h:804
@ FNEG
Perform various unary floating-point operations inspired by libm.
Definition: ISDOpcodes.h:980
@ BR_CC
BR_CC - Conditional branch.
Definition: ISDOpcodes.h:1145
@ ATOMIC_LOAD_MIN
Definition: ISDOpcodes.h:1338
@ BR_JT
BR_JT - Jumptable branch.
Definition: ISDOpcodes.h:1124
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
Definition: ISDOpcodes.h:756
@ VECREDUCE_UMAX
Definition: ISDOpcodes.h:1441
@ VACOPY
VACOPY - VACOPY has 5 operands: an input chain, a destination pointer, a source pointer,...
Definition: ISDOpcodes.h:1219
@ CopyFromReg
CopyFromReg - This node indicates that the input value is a virtual or physical register that is defi...
Definition: ISDOpcodes.h:215
@ VECREDUCE_ADD
Integer reductions may have a result type larger than the vector element type.
Definition: ISDOpcodes.h:1434
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
Definition: ISDOpcodes.h:673
@ SHL
Shift and rotation operations.
Definition: ISDOpcodes.h:734
@ ATOMIC_LOAD_CLR
Definition: ISDOpcodes.h:1334
@ ATOMIC_LOAD_AND
Definition: ISDOpcodes.h:1333
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
Definition: ISDOpcodes.h:549
@ CopyToReg
CopyToReg - This node has three operands: a chain, a register number to set to this value,...
Definition: ISDOpcodes.h:209
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
Definition: ISDOpcodes.h:810
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
Definition: ISDOpcodes.h:771
@ ATOMIC_CMP_SWAP
Val, OUTCHAIN = ATOMIC_CMP_SWAP(INCHAIN, ptr, cmp, swap) For double-word atomic operations: ValLo,...
Definition: ISDOpcodes.h:1316
@ ATOMIC_LOAD_UMAX
Definition: ISDOpcodes.h:1341
@ FMINNUM
FMINNUM/FMAXNUM - Perform floating-point minimum or maximum on two values.
Definition: ISDOpcodes.h:1028
@ DYNAMIC_STACKALLOC
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary.
Definition: ISDOpcodes.h:1109
@ ConstantPool
Definition: ISDOpcodes.h:82
@ SMIN
[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.
Definition: ISDOpcodes.h:696
@ VECREDUCE_UMIN
Definition: ISDOpcodes.h:1442
@ FRAMEADDR
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG.
Definition: ISDOpcodes.h:100
@ ATOMIC_LOAD_ADD
Definition: ISDOpcodes.h:1331
@ ATOMIC_LOAD_SUB
Definition: ISDOpcodes.h:1332
@ AND
Bitwise operators - logical and, logical or, logical xor.
Definition: ISDOpcodes.h:708
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
Definition: ISDOpcodes.h:190
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
Definition: ISDOpcodes.h:538
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
Definition: ISDOpcodes.h:52
@ ATOMIC_SWAP
Val, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amt) Val, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN,...
Definition: ISDOpcodes.h:1330
@ EH_SJLJ_SETJMP
RESULT, OUTCHAIN = EH_SJLJ_SETJMP(INCHAIN, buffer) This corresponds to the eh.sjlj....
Definition: ISDOpcodes.h:147
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
Definition: ISDOpcodes.h:816
@ VAARG
VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE, and the alignment.
Definition: ISDOpcodes.h:1214
@ BRCOND
BRCOND - Conditional branch.
Definition: ISDOpcodes.h:1138
@ BlockAddress
Definition: ISDOpcodes.h:84
@ SHL_PARTS
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.
Definition: ISDOpcodes.h:793
@ AssertSext
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
Definition: ISDOpcodes.h:61
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
Definition: ISDOpcodes.h:507
@ AssertZext
Definition: ISDOpcodes.h:62
@ BUILD_VECTOR
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...
Definition: ISDOpcodes.h:529
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
Definition: ISDOpcodes.h:1603
bool isVPOpcode(unsigned Opcode)
Whether this is a vector-predicated Opcode.
@ Dead
Unused definition.
@ System
Synchronized with respect to all concurrently executing threads.
Definition: LLVMContext.h:57
CondCode
Definition: VE.h:42
@ CC_ILE
Definition: VE.h:49
@ EH_SJLJ_SETUP_DISPATCH
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
static uint64_t getFpImmVal(const ConstantFPSDNode *N)
getFpImmVal - get immediate representation of floating point value
bool isPackedVectorType(EVT SomeVT)
Definition: VECustomDAG.cpp:22
@ Offset
Definition: DWP.cpp:480
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
bool isNullConstant(SDValue V)
Returns true if V is a constant integer zero.
static bool isIntVECondCode(VECC::CondCode CC)
Definition: VE.h:150
@ SjLj
setjmp/longjmp based exceptions
static uint64_t getImmVal(const ConstantSDNode *N)
getImmVal - get immediate representation of integer value
static const MachineInstrBuilder & addFrameReference(const MachineInstrBuilder &MIB, int FI, int Offset=0, bool mem=true)
addFrameReference - This function is used to add a reference to the base of an abstract object on the...
Value * getSplatValue(const Value *V)
Get splat value if the input is a splat vector or return nullptr.
auto reverse(ContainerTy &&C)
Definition: STLExtras.h:419
bool isMaskArithmetic(SDValue Op)
Definition: VECustomDAG.cpp:50
static VECC::CondCode fpCondCode2Fcc(ISD::CondCode CC)
Convert a DAG floating point condition code to a VE FCC condition.
bool isMaskType(EVT SomeVT)
Definition: VECustomDAG.cpp:44
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:167
AtomicOrdering
Atomic ordering for LLVM's memory model.
bool CCAssignFn(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
CCAssignFn - This function assigns a location for Val, updating State to reflect the change.
bool isVVPOrVEC(unsigned Opcode)
unsigned getKillRegState(bool B)
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition: Alignment.h:155
bool isPackingSupportOpcode(unsigned Opc)
std::pair< SDValue, bool > getAnnotatedNodeAVL(SDValue Op)
DWARFExpression::Operation Op
unsigned M0(unsigned Val)
Definition: VE.h:375
static VECC::CondCode intCondCode2Icc(ISD::CondCode CC)
Convert a DAG integer condition code to a VE ICC condition.
bool isNullFPConstant(SDValue V)
Returns true if V is an FP constant with a value of positive zero.
static bool isMImmVal(uint64_t Val)
Definition: VE.h:331
static bool isMImm32Val(uint32_t Val)
Definition: VE.h:344
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:860
#define N
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
Extended Value Type.
Definition: ValueTypes.h:35
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
Definition: ValueTypes.h:147
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
Definition: ValueTypes.h:359
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
Definition: ValueTypes.h:307
bool isVector() const
Return true if this is a vector value type.
Definition: ValueTypes.h:168
bool isInteger() const
Return true if this is an integer or a vector integer type.
Definition: ValueTypes.h:152
This class contains a discriminated union of information about pointers in memory operands,...
static MachinePointerInfo getGOT(MachineFunction &MF)
Return a MachinePointerInfo record that refers to a GOT entry.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition: Alignment.h:117
Align valueOrOne() const
For convenience, returns a valid alignment or 1 if undefined.
Definition: Alignment.h:141
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
This structure contains all information that is necessary for lowering calls.
SmallVector< ISD::InputArg, 32 > Ins
CallLoweringInfo & setDiscardResult(bool Value=true)
CallLoweringInfo & setDebugLoc(const SDLoc &dl)
SmallVector< ISD::OutputArg, 32 > Outs
SmallVector< SDValue, 32 > OutVals
CallLoweringInfo & setChain(SDValue InChain)
CallLoweringInfo & setCallee(CallingConv::ID CC, Type *ResultType, SDValue Target, ArgListTy &&ArgsList, AttributeSet ResultAttrs={})
const uint32_t * getNoPreservedMask() const override
const uint32_t * getCallPreservedMask(const MachineFunction &MF, CallingConv::ID CC) const override