LLVM 20.0.0git
VEISelLowering.cpp
Go to the documentation of this file.
1//===-- VEISelLowering.cpp - VE DAG Lowering Implementation ---------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the interfaces that VE uses to lower LLVM code into a
10// selection DAG.
11//
12//===----------------------------------------------------------------------===//
13
14#include "VEISelLowering.h"
16#include "VECustomDAG.h"
17#include "VEInstrBuilder.h"
19#include "VERegisterInfo.h"
20#include "VETargetMachine.h"
32#include "llvm/IR/Function.h"
33#include "llvm/IR/IRBuilder.h"
34#include "llvm/IR/Module.h"
37using namespace llvm;
38
39#define DEBUG_TYPE "ve-lower"
40
41//===----------------------------------------------------------------------===//
42// Calling Convention Implementation
43//===----------------------------------------------------------------------===//
44
45#include "VEGenCallingConv.inc"
46
48 switch (CallConv) {
49 default:
50 return RetCC_VE_C;
52 return RetCC_VE_Fast;
53 }
54}
55
56CCAssignFn *getParamCC(CallingConv::ID CallConv, bool IsVarArg) {
57 if (IsVarArg)
58 return CC_VE2;
59 switch (CallConv) {
60 default:
61 return CC_VE_C;
63 return CC_VE_Fast;
64 }
65}
66
68 CallingConv::ID CallConv, MachineFunction &MF, bool IsVarArg,
69 const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const {
70 CCAssignFn *RetCC = getReturnCC(CallConv);
72 CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
73 return CCInfo.CheckReturn(Outs, RetCC);
74}
75
76static const MVT AllVectorVTs[] = {MVT::v256i32, MVT::v512i32, MVT::v256i64,
77 MVT::v256f32, MVT::v512f32, MVT::v256f64};
78
79static const MVT AllMaskVTs[] = {MVT::v256i1, MVT::v512i1};
80
81static const MVT AllPackedVTs[] = {MVT::v512i32, MVT::v512f32};
82
83void VETargetLowering::initRegisterClasses() {
84 // Set up the register classes.
85 addRegisterClass(MVT::i32, &VE::I32RegClass);
86 addRegisterClass(MVT::i64, &VE::I64RegClass);
87 addRegisterClass(MVT::f32, &VE::F32RegClass);
88 addRegisterClass(MVT::f64, &VE::I64RegClass);
89 addRegisterClass(MVT::f128, &VE::F128RegClass);
90
91 if (Subtarget->enableVPU()) {
92 for (MVT VecVT : AllVectorVTs)
93 addRegisterClass(VecVT, &VE::V64RegClass);
94 addRegisterClass(MVT::v256i1, &VE::VMRegClass);
95 addRegisterClass(MVT::v512i1, &VE::VM512RegClass);
96 }
97}
98
99void VETargetLowering::initSPUActions() {
100 const auto &TM = getTargetMachine();
101 /// Load & Store {
102
103 // VE doesn't have i1 sign extending load.
104 for (MVT VT : MVT::integer_valuetypes()) {
108 setTruncStoreAction(VT, MVT::i1, Expand);
109 }
110
111 // VE doesn't have floating point extload/truncstore, so expand them.
112 for (MVT FPVT : MVT::fp_valuetypes()) {
113 for (MVT OtherFPVT : MVT::fp_valuetypes()) {
114 setLoadExtAction(ISD::EXTLOAD, FPVT, OtherFPVT, Expand);
115 setTruncStoreAction(FPVT, OtherFPVT, Expand);
116 }
117 }
118
119 // VE doesn't have fp128 load/store, so expand them in custom lower.
122
123 /// } Load & Store
124
125 // Custom legalize address nodes into LO/HI parts.
126 MVT PtrVT = MVT::getIntegerVT(TM.getPointerSizeInBits(0));
132
133 /// VAARG handling {
135 // VAARG needs to be lowered to access with 8 bytes alignment.
137 // Use the default implementation.
140 /// } VAARG handling
141
142 /// Stack {
145
146 // Use the default implementation.
149 /// } Stack
150
151 /// Branch {
152
153 // VE doesn't have BRCOND
155
156 // BR_JT is not implemented yet.
158
159 /// } Branch
160
161 /// Int Ops {
162 for (MVT IntVT : {MVT::i32, MVT::i64}) {
163 // VE has no REM or DIVREM operations.
168
169 // VE has no SHL_PARTS/SRA_PARTS/SRL_PARTS operations.
173
174 // VE has no MULHU/S or U/SMUL_LOHI operations.
175 // TODO: Use MPD instruction to implement SMUL_LOHI for i32 type.
180
181 // VE has no CTTZ, ROTL, ROTR operations.
185
186 // VE has 64 bits instruction which works as i64 BSWAP operation. This
187 // instruction works fine as i32 BSWAP operation with an additional
188 // parameter. Use isel patterns to lower BSWAP.
190
191 // VE has only 64 bits instructions which work as i64 BITREVERSE/CTLZ/CTPOP
192 // operations. Use isel patterns for i64, promote for i32.
193 LegalizeAction Act = (IntVT == MVT::i32) ? Promote : Legal;
195 setOperationAction(ISD::CTLZ, IntVT, Act);
197 setOperationAction(ISD::CTPOP, IntVT, Act);
198
199 // VE has only 64 bits instructions which work as i64 AND/OR/XOR operations.
200 // Use isel patterns for i64, promote for i32.
201 setOperationAction(ISD::AND, IntVT, Act);
202 setOperationAction(ISD::OR, IntVT, Act);
203 setOperationAction(ISD::XOR, IntVT, Act);
204
205 // Legal smax and smin
208 }
209 /// } Int Ops
210
211 /// Conversion {
212 // VE doesn't have instructions for fp<->uint, so expand them by llvm
213 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Promote); // use i64
214 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Promote); // use i64
217
218 // fp16 not supported
219 for (MVT FPVT : MVT::fp_valuetypes()) {
222 }
223 /// } Conversion
224
225 /// Floating-point Ops {
226 /// Note: Floating-point operations are fneg, fadd, fsub, fmul, fdiv, frem,
227 /// and fcmp.
228
229 // VE doesn't have following floating point operations.
230 for (MVT VT : MVT::fp_valuetypes()) {
233 }
234
235 // VE doesn't have fdiv of f128.
237
238 for (MVT FPVT : {MVT::f32, MVT::f64}) {
239 // f32 and f64 uses ConstantFP. f128 uses ConstantPool.
241 }
242 /// } Floating-point Ops
243
244 /// Floating-point math functions {
245
246 // VE doesn't have following floating point math functions.
247 for (MVT VT : MVT::fp_valuetypes()) {
255 }
256
257 // VE has single and double FMINNUM and FMAXNUM
258 for (MVT VT : {MVT::f32, MVT::f64}) {
260 }
261
262 /// } Floating-point math functions
263
264 /// Atomic instructions {
265
269
270 // Use custom inserter for ATOMIC_FENCE.
272
273 // Other atomic instructions.
274 for (MVT VT : MVT::integer_valuetypes()) {
275 // Support i8/i16 atomic swap.
277
278 // FIXME: Support "atmam" instructions.
283
284 // VE doesn't have follwing instructions.
293 }
294
295 /// } Atomic instructions
296
297 /// SJLJ instructions {
301 if (TM.Options.ExceptionModel == ExceptionHandling::SjLj)
302 setLibcallName(RTLIB::UNWIND_RESUME, "_Unwind_SjLj_Resume");
303 /// } SJLJ instructions
304
305 // Intrinsic instructions
307}
308
309void VETargetLowering::initVPUActions() {
310 for (MVT LegalMaskVT : AllMaskVTs)
312
313 for (unsigned Opc : {ISD::AND, ISD::OR, ISD::XOR})
314 setOperationAction(Opc, MVT::v512i1, Custom);
315
316 for (MVT LegalVecVT : AllVectorVTs) {
320 // Translate all vector instructions with legal element types to VVP_*
321 // nodes.
322 // TODO We will custom-widen into VVP_* nodes in the future. While we are
323 // buildling the infrastructure for this, we only do this for legal vector
324 // VTs.
325#define HANDLE_VP_TO_VVP(VP_OPC, VVP_NAME) \
326 setOperationAction(ISD::VP_OPC, LegalVecVT, Custom);
327#define ADD_VVP_OP(VVP_NAME, ISD_NAME) \
328 setOperationAction(ISD::ISD_NAME, LegalVecVT, Custom);
329 setOperationAction(ISD::EXPERIMENTAL_VP_STRIDED_LOAD, LegalVecVT, Custom);
330 setOperationAction(ISD::EXPERIMENTAL_VP_STRIDED_STORE, LegalVecVT, Custom);
331#include "VVPNodes.def"
332 }
333
334 for (MVT LegalPackedVT : AllPackedVTs) {
337 }
338
339 // vNt32, vNt64 ops (legal element types)
340 for (MVT VT : MVT::vector_valuetypes()) {
341 MVT ElemVT = VT.getVectorElementType();
342 unsigned ElemBits = ElemVT.getScalarSizeInBits();
343 if (ElemBits != 32 && ElemBits != 64)
344 continue;
345
346 for (unsigned MemOpc : {ISD::MLOAD, ISD::MSTORE, ISD::LOAD, ISD::STORE})
347 setOperationAction(MemOpc, VT, Custom);
348
349 const ISD::NodeType IntReductionOCs[] = {
353
354 for (unsigned IntRedOpc : IntReductionOCs)
355 setOperationAction(IntRedOpc, VT, Custom);
356 }
357
358 // v256i1 and v512i1 ops
359 for (MVT MaskVT : AllMaskVTs) {
360 // Custom lower mask ops
363 }
364}
365
368 bool IsVarArg,
370 const SmallVectorImpl<SDValue> &OutVals,
371 const SDLoc &DL, SelectionDAG &DAG) const {
372 // CCValAssign - represent the assignment of the return value to locations.
374
375 // CCState - Info about the registers and stack slot.
376 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
377 *DAG.getContext());
378
379 // Analyze return values.
380 CCInfo.AnalyzeReturn(Outs, getReturnCC(CallConv));
381
382 SDValue Glue;
383 SmallVector<SDValue, 4> RetOps(1, Chain);
384
385 // Copy the result values into the output registers.
386 for (unsigned i = 0; i != RVLocs.size(); ++i) {
387 CCValAssign &VA = RVLocs[i];
388 assert(VA.isRegLoc() && "Can only return in registers!");
389 assert(!VA.needsCustom() && "Unexpected custom lowering");
390 SDValue OutVal = OutVals[i];
391
392 // Integer return values must be sign or zero extended by the callee.
393 switch (VA.getLocInfo()) {
395 break;
397 OutVal = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), OutVal);
398 break;
400 OutVal = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), OutVal);
401 break;
403 OutVal = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), OutVal);
404 break;
405 case CCValAssign::BCvt: {
406 // Convert a float return value to i64 with padding.
407 // 63 31 0
408 // +------+------+
409 // | float| 0 |
410 // +------+------+
411 assert(VA.getLocVT() == MVT::i64);
412 assert(VA.getValVT() == MVT::f32);
413 SDValue Undef = SDValue(
414 DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, MVT::i64), 0);
415 SDValue Sub_f32 = DAG.getTargetConstant(VE::sub_f32, DL, MVT::i32);
416 OutVal = SDValue(DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, DL,
417 MVT::i64, Undef, OutVal, Sub_f32),
418 0);
419 break;
420 }
421 default:
422 llvm_unreachable("Unknown loc info!");
423 }
424
425 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), OutVal, Glue);
426
427 // Guarantee that all emitted copies are stuck together with flags.
428 Glue = Chain.getValue(1);
429 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
430 }
431
432 RetOps[0] = Chain; // Update chain.
433
434 // Add the glue if we have it.
435 if (Glue.getNode())
436 RetOps.push_back(Glue);
437
438 return DAG.getNode(VEISD::RET_GLUE, DL, MVT::Other, RetOps);
439}
440
442 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
443 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
444 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
446
447 // Get the base offset of the incoming arguments stack space.
448 unsigned ArgsBaseOffset = Subtarget->getRsaSize();
449 // Get the size of the preserved arguments area
450 unsigned ArgsPreserved = 64;
451
452 // Analyze arguments according to CC_VE.
454 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), ArgLocs,
455 *DAG.getContext());
456 // Allocate the preserved area first.
457 CCInfo.AllocateStack(ArgsPreserved, Align(8));
458 // We already allocated the preserved area, so the stack offset computed
459 // by CC_VE would be correct now.
460 CCInfo.AnalyzeFormalArguments(Ins, getParamCC(CallConv, false));
461
462 for (const CCValAssign &VA : ArgLocs) {
463 assert(!VA.needsCustom() && "Unexpected custom lowering");
464 if (VA.isRegLoc()) {
465 // This argument is passed in a register.
466 // All integer register arguments are promoted by the caller to i64.
467
468 // Create a virtual register for the promoted live-in value.
469 Register VReg =
470 MF.addLiveIn(VA.getLocReg(), getRegClassFor(VA.getLocVT()));
471 SDValue Arg = DAG.getCopyFromReg(Chain, DL, VReg, VA.getLocVT());
472
473 // The caller promoted the argument, so insert an Assert?ext SDNode so we
474 // won't promote the value again in this function.
475 switch (VA.getLocInfo()) {
477 Arg = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), Arg,
478 DAG.getValueType(VA.getValVT()));
479 break;
481 Arg = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), Arg,
482 DAG.getValueType(VA.getValVT()));
483 break;
484 case CCValAssign::BCvt: {
485 // Extract a float argument from i64 with padding.
486 // 63 31 0
487 // +------+------+
488 // | float| 0 |
489 // +------+------+
490 assert(VA.getLocVT() == MVT::i64);
491 assert(VA.getValVT() == MVT::f32);
492 SDValue Sub_f32 = DAG.getTargetConstant(VE::sub_f32, DL, MVT::i32);
493 Arg = SDValue(DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL,
494 MVT::f32, Arg, Sub_f32),
495 0);
496 break;
497 }
498 default:
499 break;
500 }
501
502 // Truncate the register down to the argument type.
503 if (VA.isExtInLoc())
504 Arg = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Arg);
505
506 InVals.push_back(Arg);
507 continue;
508 }
509
510 // The registers are exhausted. This argument was passed on the stack.
511 assert(VA.isMemLoc());
512 // The CC_VE_Full/Half functions compute stack offsets relative to the
513 // beginning of the arguments area at %fp + the size of reserved area.
514 unsigned Offset = VA.getLocMemOffset() + ArgsBaseOffset;
515 unsigned ValSize = VA.getValVT().getSizeInBits() / 8;
516
517 // Adjust offset for a float argument by adding 4 since the argument is
518 // stored in 8 bytes buffer with offset like below. LLVM generates
519 // 4 bytes load instruction, so need to adjust offset here. This
520 // adjustment is required in only LowerFormalArguments. In LowerCall,
521 // a float argument is converted to i64 first, and stored as 8 bytes
522 // data, which is required by ABI, so no need for adjustment.
523 // 0 4
524 // +------+------+
525 // | empty| float|
526 // +------+------+
527 if (VA.getValVT() == MVT::f32)
528 Offset += 4;
529
530 int FI = MF.getFrameInfo().CreateFixedObject(ValSize, Offset, true);
531 InVals.push_back(
532 DAG.getLoad(VA.getValVT(), DL, Chain,
535 }
536
537 if (!IsVarArg)
538 return Chain;
539
540 // This function takes variable arguments, some of which may have been passed
541 // in registers %s0-%s8.
542 //
543 // The va_start intrinsic needs to know the offset to the first variable
544 // argument.
545 // TODO: need to calculate offset correctly once we support f128.
546 unsigned ArgOffset = ArgLocs.size() * 8;
548 // Skip the reserved area at the top of stack.
549 FuncInfo->setVarArgsFrameOffset(ArgOffset + ArgsBaseOffset);
550
551 return Chain;
552}
553
554// FIXME? Maybe this could be a TableGen attribute on some registers and
555// this table could be generated automatically from RegInfo.
557 const MachineFunction &MF) const {
559 .Case("sp", VE::SX11) // Stack pointer
560 .Case("fp", VE::SX9) // Frame pointer
561 .Case("sl", VE::SX8) // Stack limit
562 .Case("lr", VE::SX10) // Link register
563 .Case("tp", VE::SX14) // Thread pointer
564 .Case("outer", VE::SX12) // Outer regiser
565 .Case("info", VE::SX17) // Info area register
566 .Case("got", VE::SX15) // Global offset table register
567 .Case("plt", VE::SX16) // Procedure linkage table register
568 .Default(0);
569
570 if (Reg)
571 return Reg;
572
573 report_fatal_error("Invalid register name global variable");
574}
575
576//===----------------------------------------------------------------------===//
577// TargetLowering Implementation
578//===----------------------------------------------------------------------===//
579
581 SmallVectorImpl<SDValue> &InVals) const {
582 SelectionDAG &DAG = CLI.DAG;
583 SDLoc DL = CLI.DL;
584 SDValue Chain = CLI.Chain;
585 auto PtrVT = getPointerTy(DAG.getDataLayout());
586
587 // VE target does not yet support tail call optimization.
588 CLI.IsTailCall = false;
589
590 // Get the base offset of the outgoing arguments stack space.
591 unsigned ArgsBaseOffset = Subtarget->getRsaSize();
592 // Get the size of the preserved arguments area
593 unsigned ArgsPreserved = 8 * 8u;
594
595 // Analyze operands of the call, assigning locations to each operand.
597 CCState CCInfo(CLI.CallConv, CLI.IsVarArg, DAG.getMachineFunction(), ArgLocs,
598 *DAG.getContext());
599 // Allocate the preserved area first.
600 CCInfo.AllocateStack(ArgsPreserved, Align(8));
601 // We already allocated the preserved area, so the stack offset computed
602 // by CC_VE would be correct now.
603 CCInfo.AnalyzeCallOperands(CLI.Outs, getParamCC(CLI.CallConv, false));
604
605 // VE requires to use both register and stack for varargs or no-prototyped
606 // functions.
607 bool UseBoth = CLI.IsVarArg;
608
609 // Analyze operands again if it is required to store BOTH.
611 CCState CCInfo2(CLI.CallConv, CLI.IsVarArg, DAG.getMachineFunction(),
612 ArgLocs2, *DAG.getContext());
613 if (UseBoth)
614 CCInfo2.AnalyzeCallOperands(CLI.Outs, getParamCC(CLI.CallConv, true));
615
616 // Get the size of the outgoing arguments stack space requirement.
617 unsigned ArgsSize = CCInfo.getStackSize();
618
619 // Keep stack frames 16-byte aligned.
620 ArgsSize = alignTo(ArgsSize, 16);
621
622 // Adjust the stack pointer to make room for the arguments.
623 // FIXME: Use hasReservedCallFrame to avoid %sp adjustments around all calls
624 // with more than 6 arguments.
625 Chain = DAG.getCALLSEQ_START(Chain, ArgsSize, 0, DL);
626
627 // Collect the set of registers to pass to the function and their values.
628 // This will be emitted as a sequence of CopyToReg nodes glued to the call
629 // instruction.
631
632 // Collect chains from all the memory opeations that copy arguments to the
633 // stack. They must follow the stack pointer adjustment above and precede the
634 // call instruction itself.
635 SmallVector<SDValue, 8> MemOpChains;
636
637 // VE needs to get address of callee function in a register
638 // So, prepare to copy it to SX12 here.
639
640 // If the callee is a GlobalAddress node (quite common, every direct call is)
641 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
642 // Likewise ExternalSymbol -> TargetExternalSymbol.
643 SDValue Callee = CLI.Callee;
644
645 bool IsPICCall = isPositionIndependent();
646
647 // PC-relative references to external symbols should go through $stub.
648 // If so, we need to prepare GlobalBaseReg first.
649 const TargetMachine &TM = DAG.getTarget();
650 const GlobalValue *GV = nullptr;
651 auto *CalleeG = dyn_cast<GlobalAddressSDNode>(Callee);
652 if (CalleeG)
653 GV = CalleeG->getGlobal();
654 bool Local = TM.shouldAssumeDSOLocal(GV);
655 bool UsePlt = !Local;
657
658 // Turn GlobalAddress/ExternalSymbol node into a value node
659 // containing the address of them here.
660 if (CalleeG) {
661 if (IsPICCall) {
662 if (UsePlt)
663 Subtarget->getInstrInfo()->getGlobalBaseReg(&MF);
664 Callee = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, 0);
665 Callee = DAG.getNode(VEISD::GETFUNPLT, DL, PtrVT, Callee);
666 } else {
667 Callee =
669 }
670 } else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee)) {
671 if (IsPICCall) {
672 if (UsePlt)
673 Subtarget->getInstrInfo()->getGlobalBaseReg(&MF);
674 Callee = DAG.getTargetExternalSymbol(E->getSymbol(), PtrVT, 0);
675 Callee = DAG.getNode(VEISD::GETFUNPLT, DL, PtrVT, Callee);
676 } else {
677 Callee =
679 }
680 }
681
682 RegsToPass.push_back(std::make_pair(VE::SX12, Callee));
683
684 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
685 CCValAssign &VA = ArgLocs[i];
686 SDValue Arg = CLI.OutVals[i];
687
688 // Promote the value if needed.
689 switch (VA.getLocInfo()) {
690 default:
691 llvm_unreachable("Unknown location info!");
693 break;
695 Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Arg);
696 break;
698 Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg);
699 break;
701 Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg);
702 break;
703 case CCValAssign::BCvt: {
704 // Convert a float argument to i64 with padding.
705 // 63 31 0
706 // +------+------+
707 // | float| 0 |
708 // +------+------+
709 assert(VA.getLocVT() == MVT::i64);
710 assert(VA.getValVT() == MVT::f32);
711 SDValue Undef = SDValue(
712 DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, MVT::i64), 0);
713 SDValue Sub_f32 = DAG.getTargetConstant(VE::sub_f32, DL, MVT::i32);
714 Arg = SDValue(DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, DL,
715 MVT::i64, Undef, Arg, Sub_f32),
716 0);
717 break;
718 }
719 }
720
721 if (VA.isRegLoc()) {
722 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
723 if (!UseBoth)
724 continue;
725 VA = ArgLocs2[i];
726 }
727
728 assert(VA.isMemLoc());
729
730 // Create a store off the stack pointer for this argument.
731 SDValue StackPtr = DAG.getRegister(VE::SX11, PtrVT);
732 // The argument area starts at %fp/%sp + the size of reserved area.
733 SDValue PtrOff =
734 DAG.getIntPtrConstant(VA.getLocMemOffset() + ArgsBaseOffset, DL);
735 PtrOff = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, PtrOff);
736 MemOpChains.push_back(
737 DAG.getStore(Chain, DL, Arg, PtrOff, MachinePointerInfo()));
738 }
739
740 // Emit all stores, make sure they occur before the call.
741 if (!MemOpChains.empty())
742 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
743
744 // Build a sequence of CopyToReg nodes glued together with token chain and
745 // glue operands which copy the outgoing args into registers. The InGlue is
746 // necessary since all emitted instructions must be stuck together in order
747 // to pass the live physical registers.
748 SDValue InGlue;
749 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
750 Chain = DAG.getCopyToReg(Chain, DL, RegsToPass[i].first,
751 RegsToPass[i].second, InGlue);
752 InGlue = Chain.getValue(1);
753 }
754
755 // Build the operands for the call instruction itself.
757 Ops.push_back(Chain);
758 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
759 Ops.push_back(DAG.getRegister(RegsToPass[i].first,
760 RegsToPass[i].second.getValueType()));
761
762 // Add a register mask operand representing the call-preserved registers.
763 const VERegisterInfo *TRI = Subtarget->getRegisterInfo();
764 const uint32_t *Mask =
765 TRI->getCallPreservedMask(DAG.getMachineFunction(), CLI.CallConv);
766 assert(Mask && "Missing call preserved mask for calling convention");
767 Ops.push_back(DAG.getRegisterMask(Mask));
768
769 // Make sure the CopyToReg nodes are glued to the call instruction which
770 // consumes the registers.
771 if (InGlue.getNode())
772 Ops.push_back(InGlue);
773
774 // Now the call itself.
775 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
776 Chain = DAG.getNode(VEISD::CALL, DL, NodeTys, Ops);
777 InGlue = Chain.getValue(1);
778
779 // Revert the stack pointer immediately after the call.
780 Chain = DAG.getCALLSEQ_END(Chain, ArgsSize, 0, InGlue, DL);
781 InGlue = Chain.getValue(1);
782
783 // Now extract the return values. This is more or less the same as
784 // LowerFormalArguments.
785
786 // Assign locations to each value returned by this call.
788 CCState RVInfo(CLI.CallConv, CLI.IsVarArg, DAG.getMachineFunction(), RVLocs,
789 *DAG.getContext());
790
791 // Set inreg flag manually for codegen generated library calls that
792 // return float.
793 if (CLI.Ins.size() == 1 && CLI.Ins[0].VT == MVT::f32 && !CLI.CB)
794 CLI.Ins[0].Flags.setInReg();
795
796 RVInfo.AnalyzeCallResult(CLI.Ins, getReturnCC(CLI.CallConv));
797
798 // Copy all of the result registers out of their specified physreg.
799 for (unsigned i = 0; i != RVLocs.size(); ++i) {
800 CCValAssign &VA = RVLocs[i];
801 assert(!VA.needsCustom() && "Unexpected custom lowering");
802 Register Reg = VA.getLocReg();
803
804 // When returning 'inreg {i32, i32 }', two consecutive i32 arguments can
805 // reside in the same register in the high and low bits. Reuse the
806 // CopyFromReg previous node to avoid duplicate copies.
807 SDValue RV;
808 if (RegisterSDNode *SrcReg = dyn_cast<RegisterSDNode>(Chain.getOperand(1)))
809 if (SrcReg->getReg() == Reg && Chain->getOpcode() == ISD::CopyFromReg)
810 RV = Chain.getValue(0);
811
812 // But usually we'll create a new CopyFromReg for a different register.
813 if (!RV.getNode()) {
814 RV = DAG.getCopyFromReg(Chain, DL, Reg, RVLocs[i].getLocVT(), InGlue);
815 Chain = RV.getValue(1);
816 InGlue = Chain.getValue(2);
817 }
818
819 // The callee promoted the return value, so insert an Assert?ext SDNode so
820 // we won't promote the value again in this function.
821 switch (VA.getLocInfo()) {
823 RV = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), RV,
824 DAG.getValueType(VA.getValVT()));
825 break;
827 RV = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), RV,
828 DAG.getValueType(VA.getValVT()));
829 break;
830 case CCValAssign::BCvt: {
831 // Extract a float return value from i64 with padding.
832 // 63 31 0
833 // +------+------+
834 // | float| 0 |
835 // +------+------+
836 assert(VA.getLocVT() == MVT::i64);
837 assert(VA.getValVT() == MVT::f32);
838 SDValue Sub_f32 = DAG.getTargetConstant(VE::sub_f32, DL, MVT::i32);
839 RV = SDValue(DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL,
840 MVT::f32, RV, Sub_f32),
841 0);
842 break;
843 }
844 default:
845 break;
846 }
847
848 // Truncate the register down to the return value type.
849 if (VA.isExtInLoc())
850 RV = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), RV);
851
852 InVals.push_back(RV);
853 }
854
855 return Chain;
856}
857
859 const GlobalAddressSDNode *GA) const {
860 // VE uses 64 bit addressing, so we need multiple instructions to generate
861 // an address. Folding address with offset increases the number of
862 // instructions, so that we disable it here. Offsets will be folded in
863 // the DAG combine later if it worth to do so.
864 return false;
865}
866
867/// isFPImmLegal - Returns true if the target can instruction select the
868/// specified FP immediate natively. If false, the legalizer will
869/// materialize the FP immediate as a load from a constant pool.
871 bool ForCodeSize) const {
872 return VT == MVT::f32 || VT == MVT::f64;
873}
874
875/// Determine if the target supports unaligned memory accesses.
876///
877/// This function returns true if the target allows unaligned memory accesses
878/// of the specified type in the given address space. If true, it also returns
879/// whether the unaligned memory access is "fast" in the last argument by
880/// reference. This is used, for example, in situations where an array
881/// copy/move/set is converted to a sequence of store operations. Its use
882/// helps to ensure that such replacements don't generate code that causes an
883/// alignment error (trap) on the target machine.
885 unsigned AddrSpace,
886 Align A,
888 unsigned *Fast) const {
889 if (Fast) {
890 // It's fast anytime on VE
891 *Fast = 1;
892 }
893 return true;
894}
895
897 const VESubtarget &STI)
898 : TargetLowering(TM), Subtarget(&STI) {
899 // Instructions which use registers as conditionals examine all the
900 // bits (as does the pseudo SELECT_CC expansion). I don't think it
901 // matters much whether it's ZeroOrOneBooleanContent, or
902 // ZeroOrNegativeOneBooleanContent, so, arbitrarily choose the
903 // former.
906
907 initRegisterClasses();
908 initSPUActions();
909 initVPUActions();
910
912
913 // We have target-specific dag combine patterns for the following nodes:
917
918 // Set function alignment to 16 bytes
920
921 // VE stores all argument by 8 bytes alignment
923
925}
926
927const char *VETargetLowering::getTargetNodeName(unsigned Opcode) const {
928#define TARGET_NODE_CASE(NAME) \
929 case VEISD::NAME: \
930 return "VEISD::" #NAME;
931 switch ((VEISD::NodeType)Opcode) {
933 break;
934 TARGET_NODE_CASE(CMPI)
935 TARGET_NODE_CASE(CMPU)
936 TARGET_NODE_CASE(CMPF)
937 TARGET_NODE_CASE(CMPQ)
938 TARGET_NODE_CASE(CMOV)
939 TARGET_NODE_CASE(CALL)
940 TARGET_NODE_CASE(EH_SJLJ_LONGJMP)
941 TARGET_NODE_CASE(EH_SJLJ_SETJMP)
942 TARGET_NODE_CASE(EH_SJLJ_SETUP_DISPATCH)
943 TARGET_NODE_CASE(GETFUNPLT)
944 TARGET_NODE_CASE(GETSTACKTOP)
945 TARGET_NODE_CASE(GETTLSADDR)
946 TARGET_NODE_CASE(GLOBAL_BASE_REG)
949 TARGET_NODE_CASE(RET_GLUE)
950 TARGET_NODE_CASE(TS1AM)
951 TARGET_NODE_CASE(VEC_UNPACK_LO)
952 TARGET_NODE_CASE(VEC_UNPACK_HI)
953 TARGET_NODE_CASE(VEC_PACK)
954 TARGET_NODE_CASE(VEC_BROADCAST)
955 TARGET_NODE_CASE(REPL_I32)
956 TARGET_NODE_CASE(REPL_F32)
957
958 TARGET_NODE_CASE(LEGALAVL)
959
960 // Register the VVP_* SDNodes.
961#define ADD_VVP_OP(VVP_NAME, ...) TARGET_NODE_CASE(VVP_NAME)
962#include "VVPNodes.def"
963 }
964#undef TARGET_NODE_CASE
965 return nullptr;
966}
967
969 EVT VT) const {
970 return MVT::i32;
971}
972
973// Convert to a target node and set target flags.
975 SelectionDAG &DAG) const {
976 if (const GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op))
977 return DAG.getTargetGlobalAddress(GA->getGlobal(), SDLoc(GA),
978 GA->getValueType(0), GA->getOffset(), TF);
979
980 if (const BlockAddressSDNode *BA = dyn_cast<BlockAddressSDNode>(Op))
981 return DAG.getTargetBlockAddress(BA->getBlockAddress(), Op.getValueType(),
982 0, TF);
983
984 if (const ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op))
985 return DAG.getTargetConstantPool(CP->getConstVal(), CP->getValueType(0),
986 CP->getAlign(), CP->getOffset(), TF);
987
988 if (const ExternalSymbolSDNode *ES = dyn_cast<ExternalSymbolSDNode>(Op))
989 return DAG.getTargetExternalSymbol(ES->getSymbol(), ES->getValueType(0),
990 TF);
991
992 if (const JumpTableSDNode *JT = dyn_cast<JumpTableSDNode>(Op))
993 return DAG.getTargetJumpTable(JT->getIndex(), JT->getValueType(0), TF);
994
995 llvm_unreachable("Unhandled address SDNode");
996}
997
998// Split Op into high and low parts according to HiTF and LoTF.
999// Return an ADD node combining the parts.
1000SDValue VETargetLowering::makeHiLoPair(SDValue Op, unsigned HiTF, unsigned LoTF,
1001 SelectionDAG &DAG) const {
1002 SDLoc DL(Op);
1003 EVT VT = Op.getValueType();
1004 SDValue Hi = DAG.getNode(VEISD::Hi, DL, VT, withTargetFlags(Op, HiTF, DAG));
1005 SDValue Lo = DAG.getNode(VEISD::Lo, DL, VT, withTargetFlags(Op, LoTF, DAG));
1006 return DAG.getNode(ISD::ADD, DL, VT, Hi, Lo);
1007}
1008
1009// Build SDNodes for producing an address from a GlobalAddress, ConstantPool,
1010// or ExternalSymbol SDNode.
1012 SDLoc DL(Op);
1013 EVT PtrVT = Op.getValueType();
1014
1015 // Handle PIC mode first. VE needs a got load for every variable!
1016 if (isPositionIndependent()) {
1017 auto GlobalN = dyn_cast<GlobalAddressSDNode>(Op);
1018
1019 if (isa<ConstantPoolSDNode>(Op) || isa<JumpTableSDNode>(Op) ||
1020 (GlobalN && GlobalN->getGlobal()->hasLocalLinkage())) {
1021 // Create following instructions for local linkage PIC code.
1022 // lea %reg, label@gotoff_lo
1023 // and %reg, %reg, (32)0
1024 // lea.sl %reg, label@gotoff_hi(%reg, %got)
1027 SDValue GlobalBase = DAG.getNode(VEISD::GLOBAL_BASE_REG, DL, PtrVT);
1028 return DAG.getNode(ISD::ADD, DL, PtrVT, GlobalBase, HiLo);
1029 }
1030 // Create following instructions for not local linkage PIC code.
1031 // lea %reg, label@got_lo
1032 // and %reg, %reg, (32)0
1033 // lea.sl %reg, label@got_hi(%reg)
1034 // ld %reg, (%reg, %got)
1037 SDValue GlobalBase = DAG.getNode(VEISD::GLOBAL_BASE_REG, DL, PtrVT);
1038 SDValue AbsAddr = DAG.getNode(ISD::ADD, DL, PtrVT, GlobalBase, HiLo);
1039 return DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), AbsAddr,
1041 }
1042
1043 // This is one of the absolute code models.
1044 switch (getTargetMachine().getCodeModel()) {
1045 default:
1046 llvm_unreachable("Unsupported absolute code model");
1047 case CodeModel::Small:
1048 case CodeModel::Medium:
1049 case CodeModel::Large:
1050 // abs64.
1052 }
1053}
1054
1055/// Custom Lower {
1056
1057// The mappings for emitLeading/TrailingFence for VE is designed by following
1058// http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
1060 Instruction *Inst,
1061 AtomicOrdering Ord) const {
1062 switch (Ord) {
1065 llvm_unreachable("Invalid fence: unordered/non-atomic");
1068 return nullptr; // Nothing to do
1071 return Builder.CreateFence(AtomicOrdering::Release);
1073 if (!Inst->hasAtomicStore())
1074 return nullptr; // Nothing to do
1076 }
1077 llvm_unreachable("Unknown fence ordering in emitLeadingFence");
1078}
1079
1081 Instruction *Inst,
1082 AtomicOrdering Ord) const {
1083 switch (Ord) {
1086 llvm_unreachable("Invalid fence: unordered/not-atomic");
1089 return nullptr; // Nothing to do
1092 return Builder.CreateFence(AtomicOrdering::Acquire);
1095 }
1096 llvm_unreachable("Unknown fence ordering in emitTrailingFence");
1097}
1098
1100 SelectionDAG &DAG) const {
1101 SDLoc DL(Op);
1102 AtomicOrdering FenceOrdering =
1103 static_cast<AtomicOrdering>(Op.getConstantOperandVal(1));
1104 SyncScope::ID FenceSSID =
1105 static_cast<SyncScope::ID>(Op.getConstantOperandVal(2));
1106
1107 // VE uses Release consistency, so need a fence instruction if it is a
1108 // cross-thread fence.
1109 if (FenceSSID == SyncScope::System) {
1110 switch (FenceOrdering) {
1114 // No need to generate fencem instruction here.
1115 break;
1117 // Generate "fencem 2" as acquire fence.
1118 return SDValue(DAG.getMachineNode(VE::FENCEM, DL, MVT::Other,
1119 DAG.getTargetConstant(2, DL, MVT::i32),
1120 Op.getOperand(0)),
1121 0);
1123 // Generate "fencem 1" as release fence.
1124 return SDValue(DAG.getMachineNode(VE::FENCEM, DL, MVT::Other,
1125 DAG.getTargetConstant(1, DL, MVT::i32),
1126 Op.getOperand(0)),
1127 0);
1130 // Generate "fencem 3" as acq_rel and seq_cst fence.
1131 // FIXME: "fencem 3" doesn't wait for PCIe deveices accesses,
1132 // so seq_cst may require more instruction for them.
1133 return SDValue(DAG.getMachineNode(VE::FENCEM, DL, MVT::Other,
1134 DAG.getTargetConstant(3, DL, MVT::i32),
1135 Op.getOperand(0)),
1136 0);
1137 }
1138 }
1139
1140 // MEMBARRIER is a compiler barrier; it codegens to a no-op.
1141 return DAG.getNode(ISD::MEMBARRIER, DL, MVT::Other, Op.getOperand(0));
1142}
1143
1146 // We have TS1AM implementation for i8/i16/i32/i64, so use it.
1147 if (AI->getOperation() == AtomicRMWInst::Xchg) {
1149 }
1150 // FIXME: Support "ATMAM" instruction for LOAD_ADD/SUB/AND/OR.
1151
1152 // Otherwise, expand it using compare and exchange instruction to not call
1153 // __sync_fetch_and_* functions.
1155}
1156
1158 SDValue &Bits) {
1159 SDLoc DL(Op);
1160 AtomicSDNode *N = cast<AtomicSDNode>(Op);
1161 SDValue Ptr = N->getOperand(1);
1162 SDValue Val = N->getOperand(2);
1163 EVT PtrVT = Ptr.getValueType();
1164 bool Byte = N->getMemoryVT() == MVT::i8;
1165 // Remainder = AND Ptr, 3
1166 // Flag = 1 << Remainder ; If Byte is true (1 byte swap flag)
1167 // Flag = 3 << Remainder ; If Byte is false (2 bytes swap flag)
1168 // Bits = Remainder << 3
1169 // NewVal = Val << Bits
1170 SDValue Const3 = DAG.getConstant(3, DL, PtrVT);
1171 SDValue Remainder = DAG.getNode(ISD::AND, DL, PtrVT, {Ptr, Const3});
1172 SDValue Mask = Byte ? DAG.getConstant(1, DL, MVT::i32)
1173 : DAG.getConstant(3, DL, MVT::i32);
1174 Flag = DAG.getNode(ISD::SHL, DL, MVT::i32, {Mask, Remainder});
1175 Bits = DAG.getNode(ISD::SHL, DL, PtrVT, {Remainder, Const3});
1176 return DAG.getNode(ISD::SHL, DL, Val.getValueType(), {Val, Bits});
1177}
1178
1180 SDValue Bits) {
1181 SDLoc DL(Op);
1182 EVT VT = Data.getValueType();
1183 bool Byte = cast<AtomicSDNode>(Op)->getMemoryVT() == MVT::i8;
1184 // NewData = Data >> Bits
1185 // Result = NewData & 0xff ; If Byte is true (1 byte)
1186 // Result = NewData & 0xffff ; If Byte is false (2 bytes)
1187
1188 SDValue NewData = DAG.getNode(ISD::SRL, DL, VT, Data, Bits);
1189 return DAG.getNode(ISD::AND, DL, VT,
1190 {NewData, DAG.getConstant(Byte ? 0xff : 0xffff, DL, VT)});
1191}
1192
1194 SelectionDAG &DAG) const {
1195 SDLoc DL(Op);
1196 AtomicSDNode *N = cast<AtomicSDNode>(Op);
1197
1198 if (N->getMemoryVT() == MVT::i8) {
1199 // For i8, use "ts1am"
1200 // Input:
1201 // ATOMIC_SWAP Ptr, Val, Order
1202 //
1203 // Output:
1204 // Remainder = AND Ptr, 3
1205 // Flag = 1 << Remainder ; 1 byte swap flag for TS1AM inst.
1206 // Bits = Remainder << 3
1207 // NewVal = Val << Bits
1208 //
1209 // Aligned = AND Ptr, -4
1210 // Data = TS1AM Aligned, Flag, NewVal
1211 //
1212 // NewData = Data >> Bits
1213 // Result = NewData & 0xff ; 1 byte result
1214 SDValue Flag;
1215 SDValue Bits;
1216 SDValue NewVal = prepareTS1AM(Op, DAG, Flag, Bits);
1217
1218 SDValue Ptr = N->getOperand(1);
1219 SDValue Aligned = DAG.getNode(ISD::AND, DL, Ptr.getValueType(),
1220 {Ptr, DAG.getConstant(-4, DL, MVT::i64)});
1221 SDValue TS1AM = DAG.getAtomic(VEISD::TS1AM, DL, N->getMemoryVT(),
1222 DAG.getVTList(Op.getNode()->getValueType(0),
1223 Op.getNode()->getValueType(1)),
1224 {N->getChain(), Aligned, Flag, NewVal},
1225 N->getMemOperand());
1226
1227 SDValue Result = finalizeTS1AM(Op, DAG, TS1AM, Bits);
1228 SDValue Chain = TS1AM.getValue(1);
1229 return DAG.getMergeValues({Result, Chain}, DL);
1230 }
1231 if (N->getMemoryVT() == MVT::i16) {
1232 // For i16, use "ts1am"
1233 SDValue Flag;
1234 SDValue Bits;
1235 SDValue NewVal = prepareTS1AM(Op, DAG, Flag, Bits);
1236
1237 SDValue Ptr = N->getOperand(1);
1238 SDValue Aligned = DAG.getNode(ISD::AND, DL, Ptr.getValueType(),
1239 {Ptr, DAG.getConstant(-4, DL, MVT::i64)});
1240 SDValue TS1AM = DAG.getAtomic(VEISD::TS1AM, DL, N->getMemoryVT(),
1241 DAG.getVTList(Op.getNode()->getValueType(0),
1242 Op.getNode()->getValueType(1)),
1243 {N->getChain(), Aligned, Flag, NewVal},
1244 N->getMemOperand());
1245
1246 SDValue Result = finalizeTS1AM(Op, DAG, TS1AM, Bits);
1247 SDValue Chain = TS1AM.getValue(1);
1248 return DAG.getMergeValues({Result, Chain}, DL);
1249 }
1250 // Otherwise, let llvm legalize it.
1251 return Op;
1252}
1253
1255 SelectionDAG &DAG) const {
1256 return makeAddress(Op, DAG);
1257}
1258
1260 SelectionDAG &DAG) const {
1261 return makeAddress(Op, DAG);
1262}
1263
1265 SelectionDAG &DAG) const {
1266 return makeAddress(Op, DAG);
1267}
1268
1269SDValue
1271 SelectionDAG &DAG) const {
1272 SDLoc DL(Op);
1273
1274 // Generate the following code:
1275 // t1: ch,glue = callseq_start t0, 0, 0
1276 // t2: i64,ch,glue = VEISD::GETTLSADDR t1, label, t1:1
1277 // t3: ch,glue = callseq_end t2, 0, 0, t2:2
1278 // t4: i64,ch,glue = CopyFromReg t3, Register:i64 $sx0, t3:1
1279 SDValue Label = withTargetFlags(Op, 0, DAG);
1280 EVT PtrVT = Op.getValueType();
1281
1282 // Lowering the machine isd will make sure everything is in the right
1283 // location.
1284 SDValue Chain = DAG.getEntryNode();
1285 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
1286 const uint32_t *Mask = Subtarget->getRegisterInfo()->getCallPreservedMask(
1288 Chain = DAG.getCALLSEQ_START(Chain, 64, 0, DL);
1289 SDValue Args[] = {Chain, Label, DAG.getRegisterMask(Mask), Chain.getValue(1)};
1290 Chain = DAG.getNode(VEISD::GETTLSADDR, DL, NodeTys, Args);
1291 Chain = DAG.getCALLSEQ_END(Chain, 64, 0, Chain.getValue(1), DL);
1292 Chain = DAG.getCopyFromReg(Chain, DL, VE::SX0, PtrVT, Chain.getValue(1));
1293
1294 // GETTLSADDR will be codegen'ed as call. Inform MFI that function has calls.
1296 MFI.setHasCalls(true);
1297
1298 // Also generate code to prepare a GOT register if it is PIC.
1299 if (isPositionIndependent()) {
1301 Subtarget->getInstrInfo()->getGlobalBaseReg(&MF);
1302 }
1303
1304 return Chain;
1305}
1306
1308 SelectionDAG &DAG) const {
1309 // The current implementation of nld (2.26) doesn't allow local exec model
1310 // code described in VE-tls_v1.1.pdf (*1) as its input. Instead, we always
1311 // generate the general dynamic model code sequence.
1312 //
1313 // *1: https://www.nec.com/en/global/prod/hpc/aurora/document/VE-tls_v1.1.pdf
1314 return lowerToTLSGeneralDynamicModel(Op, DAG);
1315}
1316
1318 return makeAddress(Op, DAG);
1319}
1320
1321// Lower a f128 load into two f64 loads.
1323 SDLoc DL(Op);
1324 LoadSDNode *LdNode = dyn_cast<LoadSDNode>(Op.getNode());
1325 assert(LdNode && LdNode->getOffset().isUndef() && "Unexpected node type");
1326 Align Alignment = LdNode->getAlign();
1327 if (Alignment > 8)
1328 Alignment = Align(8);
1329
1330 SDValue Lo64 =
1331 DAG.getLoad(MVT::f64, DL, LdNode->getChain(), LdNode->getBasePtr(),
1332 LdNode->getPointerInfo(), Alignment,
1335 EVT AddrVT = LdNode->getBasePtr().getValueType();
1336 SDValue HiPtr = DAG.getNode(ISD::ADD, DL, AddrVT, LdNode->getBasePtr(),
1337 DAG.getConstant(8, DL, AddrVT));
1338 SDValue Hi64 =
1339 DAG.getLoad(MVT::f64, DL, LdNode->getChain(), HiPtr,
1340 LdNode->getPointerInfo(), Alignment,
1343
1344 SDValue SubRegEven = DAG.getTargetConstant(VE::sub_even, DL, MVT::i32);
1345 SDValue SubRegOdd = DAG.getTargetConstant(VE::sub_odd, DL, MVT::i32);
1346
1347 // VE stores Hi64 to 8(addr) and Lo64 to 0(addr)
1348 SDNode *InFP128 =
1349 DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, MVT::f128);
1350 InFP128 = DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, DL, MVT::f128,
1351 SDValue(InFP128, 0), Hi64, SubRegEven);
1352 InFP128 = DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, DL, MVT::f128,
1353 SDValue(InFP128, 0), Lo64, SubRegOdd);
1354 SDValue OutChains[2] = {SDValue(Lo64.getNode(), 1),
1355 SDValue(Hi64.getNode(), 1)};
1356 SDValue OutChain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains);
1357 SDValue Ops[2] = {SDValue(InFP128, 0), OutChain};
1358 return DAG.getMergeValues(Ops, DL);
1359}
1360
1361// Lower a vXi1 load into following instructions
1362// LDrii %1, (,%addr)
1363// LVMxir %vm, 0, %1
1364// LDrii %2, 8(,%addr)
1365// LVMxir %vm, 0, %2
1366// ...
1368 SDLoc DL(Op);
1369 LoadSDNode *LdNode = dyn_cast<LoadSDNode>(Op.getNode());
1370 assert(LdNode && LdNode->getOffset().isUndef() && "Unexpected node type");
1371
1372 SDValue BasePtr = LdNode->getBasePtr();
1373 Align Alignment = LdNode->getAlign();
1374 if (Alignment > 8)
1375 Alignment = Align(8);
1376
1377 EVT AddrVT = BasePtr.getValueType();
1378 EVT MemVT = LdNode->getMemoryVT();
1379 if (MemVT == MVT::v256i1 || MemVT == MVT::v4i64) {
1380 SDValue OutChains[4];
1381 SDNode *VM = DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, MemVT);
1382 for (int i = 0; i < 4; ++i) {
1383 // Generate load dag and prepare chains.
1384 SDValue Addr = DAG.getNode(ISD::ADD, DL, AddrVT, BasePtr,
1385 DAG.getConstant(8 * i, DL, AddrVT));
1386 SDValue Val =
1387 DAG.getLoad(MVT::i64, DL, LdNode->getChain(), Addr,
1388 LdNode->getPointerInfo(), Alignment,
1391 OutChains[i] = SDValue(Val.getNode(), 1);
1392
1393 VM = DAG.getMachineNode(VE::LVMir_m, DL, MVT::i64,
1394 DAG.getTargetConstant(i, DL, MVT::i64), Val,
1395 SDValue(VM, 0));
1396 }
1397 SDValue OutChain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains);
1398 SDValue Ops[2] = {SDValue(VM, 0), OutChain};
1399 return DAG.getMergeValues(Ops, DL);
1400 } else if (MemVT == MVT::v512i1 || MemVT == MVT::v8i64) {
1401 SDValue OutChains[8];
1402 SDNode *VM = DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, MemVT);
1403 for (int i = 0; i < 8; ++i) {
1404 // Generate load dag and prepare chains.
1405 SDValue Addr = DAG.getNode(ISD::ADD, DL, AddrVT, BasePtr,
1406 DAG.getConstant(8 * i, DL, AddrVT));
1407 SDValue Val =
1408 DAG.getLoad(MVT::i64, DL, LdNode->getChain(), Addr,
1409 LdNode->getPointerInfo(), Alignment,
1412 OutChains[i] = SDValue(Val.getNode(), 1);
1413
1414 VM = DAG.getMachineNode(VE::LVMyir_y, DL, MVT::i64,
1415 DAG.getTargetConstant(i, DL, MVT::i64), Val,
1416 SDValue(VM, 0));
1417 }
1418 SDValue OutChain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains);
1419 SDValue Ops[2] = {SDValue(VM, 0), OutChain};
1420 return DAG.getMergeValues(Ops, DL);
1421 } else {
1422 // Otherwise, ask llvm to expand it.
1423 return SDValue();
1424 }
1425}
1426
1428 LoadSDNode *LdNode = cast<LoadSDNode>(Op.getNode());
1429 EVT MemVT = LdNode->getMemoryVT();
1430
1431 // If VPU is enabled, always expand non-mask vector loads to VVP
1432 if (Subtarget->enableVPU() && MemVT.isVector() && !isMaskType(MemVT))
1433 return lowerToVVP(Op, DAG);
1434
1435 SDValue BasePtr = LdNode->getBasePtr();
1436 if (isa<FrameIndexSDNode>(BasePtr.getNode())) {
1437 // Do not expand store instruction with frame index here because of
1438 // dependency problems. We expand it later in eliminateFrameIndex().
1439 return Op;
1440 }
1441
1442 if (MemVT == MVT::f128)
1443 return lowerLoadF128(Op, DAG);
1444 if (isMaskType(MemVT))
1445 return lowerLoadI1(Op, DAG);
1446
1447 return Op;
1448}
1449
1450// Lower a f128 store into two f64 stores.
1452 SDLoc DL(Op);
1453 StoreSDNode *StNode = dyn_cast<StoreSDNode>(Op.getNode());
1454 assert(StNode && StNode->getOffset().isUndef() && "Unexpected node type");
1455
1456 SDValue SubRegEven = DAG.getTargetConstant(VE::sub_even, DL, MVT::i32);
1457 SDValue SubRegOdd = DAG.getTargetConstant(VE::sub_odd, DL, MVT::i32);
1458
1459 SDNode *Hi64 = DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL, MVT::i64,
1460 StNode->getValue(), SubRegEven);
1461 SDNode *Lo64 = DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL, MVT::i64,
1462 StNode->getValue(), SubRegOdd);
1463
1464 Align Alignment = StNode->getAlign();
1465 if (Alignment > 8)
1466 Alignment = Align(8);
1467
1468 // VE stores Hi64 to 8(addr) and Lo64 to 0(addr)
1469 SDValue OutChains[2];
1470 OutChains[0] =
1471 DAG.getStore(StNode->getChain(), DL, SDValue(Lo64, 0),
1472 StNode->getBasePtr(), MachinePointerInfo(), Alignment,
1475 EVT AddrVT = StNode->getBasePtr().getValueType();
1476 SDValue HiPtr = DAG.getNode(ISD::ADD, DL, AddrVT, StNode->getBasePtr(),
1477 DAG.getConstant(8, DL, AddrVT));
1478 OutChains[1] =
1479 DAG.getStore(StNode->getChain(), DL, SDValue(Hi64, 0), HiPtr,
1480 MachinePointerInfo(), Alignment,
1483 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains);
1484}
1485
1486// Lower a vXi1 store into following instructions
1487// SVMi %1, %vm, 0
1488// STrii %1, (,%addr)
1489// SVMi %2, %vm, 1
1490// STrii %2, 8(,%addr)
1491// ...
1493 SDLoc DL(Op);
1494 StoreSDNode *StNode = dyn_cast<StoreSDNode>(Op.getNode());
1495 assert(StNode && StNode->getOffset().isUndef() && "Unexpected node type");
1496
1497 SDValue BasePtr = StNode->getBasePtr();
1498 Align Alignment = StNode->getAlign();
1499 if (Alignment > 8)
1500 Alignment = Align(8);
1501 EVT AddrVT = BasePtr.getValueType();
1502 EVT MemVT = StNode->getMemoryVT();
1503 if (MemVT == MVT::v256i1 || MemVT == MVT::v4i64) {
1504 SDValue OutChains[4];
1505 for (int i = 0; i < 4; ++i) {
1506 SDNode *V =
1507 DAG.getMachineNode(VE::SVMmi, DL, MVT::i64, StNode->getValue(),
1508 DAG.getTargetConstant(i, DL, MVT::i64));
1509 SDValue Addr = DAG.getNode(ISD::ADD, DL, AddrVT, BasePtr,
1510 DAG.getConstant(8 * i, DL, AddrVT));
1511 OutChains[i] =
1512 DAG.getStore(StNode->getChain(), DL, SDValue(V, 0), Addr,
1513 MachinePointerInfo(), Alignment,
1516 }
1517 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains);
1518 } else if (MemVT == MVT::v512i1 || MemVT == MVT::v8i64) {
1519 SDValue OutChains[8];
1520 for (int i = 0; i < 8; ++i) {
1521 SDNode *V =
1522 DAG.getMachineNode(VE::SVMyi, DL, MVT::i64, StNode->getValue(),
1523 DAG.getTargetConstant(i, DL, MVT::i64));
1524 SDValue Addr = DAG.getNode(ISD::ADD, DL, AddrVT, BasePtr,
1525 DAG.getConstant(8 * i, DL, AddrVT));
1526 OutChains[i] =
1527 DAG.getStore(StNode->getChain(), DL, SDValue(V, 0), Addr,
1528 MachinePointerInfo(), Alignment,
1531 }
1532 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains);
1533 } else {
1534 // Otherwise, ask llvm to expand it.
1535 return SDValue();
1536 }
1537}
1538
1540 StoreSDNode *StNode = cast<StoreSDNode>(Op.getNode());
1541 assert(StNode && StNode->getOffset().isUndef() && "Unexpected node type");
1542 EVT MemVT = StNode->getMemoryVT();
1543
1544 // If VPU is enabled, always expand non-mask vector stores to VVP
1545 if (Subtarget->enableVPU() && MemVT.isVector() && !isMaskType(MemVT))
1546 return lowerToVVP(Op, DAG);
1547
1548 SDValue BasePtr = StNode->getBasePtr();
1549 if (isa<FrameIndexSDNode>(BasePtr.getNode())) {
1550 // Do not expand store instruction with frame index here because of
1551 // dependency problems. We expand it later in eliminateFrameIndex().
1552 return Op;
1553 }
1554
1555 if (MemVT == MVT::f128)
1556 return lowerStoreF128(Op, DAG);
1557 if (isMaskType(MemVT))
1558 return lowerStoreI1(Op, DAG);
1559
1560 // Otherwise, ask llvm to expand it.
1561 return SDValue();
1562}
1563
1567 auto PtrVT = getPointerTy(DAG.getDataLayout());
1568
1569 // Need frame address to find the address of VarArgsFrameIndex.
1571
1572 // vastart just stores the address of the VarArgsFrameIndex slot into the
1573 // memory location argument.
1574 SDLoc DL(Op);
1575 SDValue Offset =
1576 DAG.getNode(ISD::ADD, DL, PtrVT, DAG.getRegister(VE::SX9, PtrVT),
1577 DAG.getIntPtrConstant(FuncInfo->getVarArgsFrameOffset(), DL));
1578 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
1579 return DAG.getStore(Op.getOperand(0), DL, Offset, Op.getOperand(1),
1580 MachinePointerInfo(SV));
1581}
1582
1584 SDNode *Node = Op.getNode();
1585 EVT VT = Node->getValueType(0);
1586 SDValue InChain = Node->getOperand(0);
1587 SDValue VAListPtr = Node->getOperand(1);
1588 EVT PtrVT = VAListPtr.getValueType();
1589 const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
1590 SDLoc DL(Node);
1591 SDValue VAList =
1592 DAG.getLoad(PtrVT, DL, InChain, VAListPtr, MachinePointerInfo(SV));
1593 SDValue Chain = VAList.getValue(1);
1594 SDValue NextPtr;
1595
1596 if (VT == MVT::f128) {
1597 // VE f128 values must be stored with 16 bytes alignment. We don't
1598 // know the actual alignment of VAList, so we take alignment of it
1599 // dynamically.
1600 int Align = 16;
1601 VAList = DAG.getNode(ISD::ADD, DL, PtrVT, VAList,
1602 DAG.getConstant(Align - 1, DL, PtrVT));
1603 VAList = DAG.getNode(ISD::AND, DL, PtrVT, VAList,
1604 DAG.getConstant(-Align, DL, PtrVT));
1605 // Increment the pointer, VAList, by 16 to the next vaarg.
1606 NextPtr =
1607 DAG.getNode(ISD::ADD, DL, PtrVT, VAList, DAG.getIntPtrConstant(16, DL));
1608 } else if (VT == MVT::f32) {
1609 // float --> need special handling like below.
1610 // 0 4
1611 // +------+------+
1612 // | empty| float|
1613 // +------+------+
1614 // Increment the pointer, VAList, by 8 to the next vaarg.
1615 NextPtr =
1616 DAG.getNode(ISD::ADD, DL, PtrVT, VAList, DAG.getIntPtrConstant(8, DL));
1617 // Then, adjust VAList.
1618 unsigned InternalOffset = 4;
1619 VAList = DAG.getNode(ISD::ADD, DL, PtrVT, VAList,
1620 DAG.getConstant(InternalOffset, DL, PtrVT));
1621 } else {
1622 // Increment the pointer, VAList, by 8 to the next vaarg.
1623 NextPtr =
1624 DAG.getNode(ISD::ADD, DL, PtrVT, VAList, DAG.getIntPtrConstant(8, DL));
1625 }
1626
1627 // Store the incremented VAList to the legalized pointer.
1628 InChain = DAG.getStore(Chain, DL, NextPtr, VAListPtr, MachinePointerInfo(SV));
1629
1630 // Load the actual argument out of the pointer VAList.
1631 // We can't count on greater alignment than the word size.
1632 return DAG.getLoad(
1633 VT, DL, InChain, VAList, MachinePointerInfo(),
1634 Align(std::min(PtrVT.getSizeInBits(), VT.getSizeInBits()) / 8));
1635}
1636
1638 SelectionDAG &DAG) const {
1639 // Generate following code.
1640 // (void)__llvm_grow_stack(size);
1641 // ret = GETSTACKTOP; // pseudo instruction
1642 SDLoc DL(Op);
1643
1644 // Get the inputs.
1645 SDNode *Node = Op.getNode();
1646 SDValue Chain = Op.getOperand(0);
1647 SDValue Size = Op.getOperand(1);
1648 MaybeAlign Alignment(Op.getConstantOperandVal(2));
1649 EVT VT = Node->getValueType(0);
1650
1651 // Chain the dynamic stack allocation so that it doesn't modify the stack
1652 // pointer when other instructions are using the stack.
1653 Chain = DAG.getCALLSEQ_START(Chain, 0, 0, DL);
1654
1655 const TargetFrameLowering &TFI = *Subtarget->getFrameLowering();
1656 Align StackAlign = TFI.getStackAlign();
1657 bool NeedsAlign = Alignment.valueOrOne() > StackAlign;
1658
1659 // Prepare arguments
1662 Entry.Node = Size;
1663 Entry.Ty = Entry.Node.getValueType().getTypeForEVT(*DAG.getContext());
1664 Args.push_back(Entry);
1665 if (NeedsAlign) {
1666 Entry.Node = DAG.getConstant(~(Alignment->value() - 1ULL), DL, VT);
1667 Entry.Ty = Entry.Node.getValueType().getTypeForEVT(*DAG.getContext());
1668 Args.push_back(Entry);
1669 }
1671
1672 EVT PtrVT = Op.getValueType();
1673 SDValue Callee;
1674 if (NeedsAlign) {
1675 Callee = DAG.getTargetExternalSymbol("__ve_grow_stack_align", PtrVT, 0);
1676 } else {
1677 Callee = DAG.getTargetExternalSymbol("__ve_grow_stack", PtrVT, 0);
1678 }
1679
1681 CLI.setDebugLoc(DL)
1682 .setChain(Chain)
1683 .setCallee(CallingConv::PreserveAll, RetTy, Callee, std::move(Args))
1684 .setDiscardResult(true);
1685 std::pair<SDValue, SDValue> pair = LowerCallTo(CLI);
1686 Chain = pair.second;
1687 SDValue Result = DAG.getNode(VEISD::GETSTACKTOP, DL, VT, Chain);
1688 if (NeedsAlign) {
1689 Result = DAG.getNode(ISD::ADD, DL, VT, Result,
1690 DAG.getConstant((Alignment->value() - 1ULL), DL, VT));
1691 Result = DAG.getNode(ISD::AND, DL, VT, Result,
1692 DAG.getConstant(~(Alignment->value() - 1ULL), DL, VT));
1693 }
1694 // Chain = Result.getValue(1);
1695 Chain = DAG.getCALLSEQ_END(Chain, 0, 0, SDValue(), DL);
1696
1697 SDValue Ops[2] = {Result, Chain};
1698 return DAG.getMergeValues(Ops, DL);
1699}
1700
1702 SelectionDAG &DAG) const {
1703 SDLoc DL(Op);
1704 return DAG.getNode(VEISD::EH_SJLJ_LONGJMP, DL, MVT::Other, Op.getOperand(0),
1705 Op.getOperand(1));
1706}
1707
1709 SelectionDAG &DAG) const {
1710 SDLoc DL(Op);
1711 return DAG.getNode(VEISD::EH_SJLJ_SETJMP, DL,
1712 DAG.getVTList(MVT::i32, MVT::Other), Op.getOperand(0),
1713 Op.getOperand(1));
1714}
1715
1717 SelectionDAG &DAG) const {
1718 SDLoc DL(Op);
1719 return DAG.getNode(VEISD::EH_SJLJ_SETUP_DISPATCH, DL, MVT::Other,
1720 Op.getOperand(0));
1721}
1722
1724 const VETargetLowering &TLI,
1725 const VESubtarget *Subtarget) {
1726 SDLoc DL(Op);
1728 EVT PtrVT = TLI.getPointerTy(MF.getDataLayout());
1729
1730 MachineFrameInfo &MFI = MF.getFrameInfo();
1731 MFI.setFrameAddressIsTaken(true);
1732
1733 unsigned Depth = Op.getConstantOperandVal(0);
1734 const VERegisterInfo *RegInfo = Subtarget->getRegisterInfo();
1735 Register FrameReg = RegInfo->getFrameRegister(MF);
1736 SDValue FrameAddr =
1737 DAG.getCopyFromReg(DAG.getEntryNode(), DL, FrameReg, PtrVT);
1738 while (Depth--)
1739 FrameAddr = DAG.getLoad(Op.getValueType(), DL, DAG.getEntryNode(),
1740 FrameAddr, MachinePointerInfo());
1741 return FrameAddr;
1742}
1743
1745 const VETargetLowering &TLI,
1746 const VESubtarget *Subtarget) {
1748 MachineFrameInfo &MFI = MF.getFrameInfo();
1749 MFI.setReturnAddressIsTaken(true);
1750
1752 return SDValue();
1753
1754 SDValue FrameAddr = lowerFRAMEADDR(Op, DAG, TLI, Subtarget);
1755
1756 SDLoc DL(Op);
1757 EVT VT = Op.getValueType();
1758 SDValue Offset = DAG.getConstant(8, DL, VT);
1759 return DAG.getLoad(VT, DL, DAG.getEntryNode(),
1760 DAG.getNode(ISD::ADD, DL, VT, FrameAddr, Offset),
1762}
1763
1765 SelectionDAG &DAG) const {
1766 SDLoc DL(Op);
1767 unsigned IntNo = Op.getConstantOperandVal(0);
1768 switch (IntNo) {
1769 default: // Don't custom lower most intrinsics.
1770 return SDValue();
1771 case Intrinsic::eh_sjlj_lsda: {
1773 MVT VT = Op.getSimpleValueType();
1774 const VETargetMachine *TM =
1775 static_cast<const VETargetMachine *>(&DAG.getTarget());
1776
1777 // Create GCC_except_tableXX string. The real symbol for that will be
1778 // generated in EHStreamer::emitExceptionTable() later. So, we just
1779 // borrow it's name here.
1780 TM->getStrList()->push_back(std::string(
1781 (Twine("GCC_except_table") + Twine(MF.getFunctionNumber())).str()));
1782 SDValue Addr =
1783 DAG.getTargetExternalSymbol(TM->getStrList()->back().c_str(), VT, 0);
1784 if (isPositionIndependent()) {
1787 SDValue GlobalBase = DAG.getNode(VEISD::GLOBAL_BASE_REG, DL, VT);
1788 return DAG.getNode(ISD::ADD, DL, VT, GlobalBase, Addr);
1789 }
1791 }
1792 }
1793}
1794
1795static bool getUniqueInsertion(SDNode *N, unsigned &UniqueIdx) {
1796 if (!isa<BuildVectorSDNode>(N))
1797 return false;
1798 const auto *BVN = cast<BuildVectorSDNode>(N);
1799
1800 // Find first non-undef insertion.
1801 unsigned Idx;
1802 for (Idx = 0; Idx < BVN->getNumOperands(); ++Idx) {
1803 auto ElemV = BVN->getOperand(Idx);
1804 if (!ElemV->isUndef())
1805 break;
1806 }
1807 // Catch the (hypothetical) all-undef case.
1808 if (Idx == BVN->getNumOperands())
1809 return false;
1810 // Remember insertion.
1811 UniqueIdx = Idx++;
1812 // Verify that all other insertions are undef.
1813 for (; Idx < BVN->getNumOperands(); ++Idx) {
1814 auto ElemV = BVN->getOperand(Idx);
1815 if (!ElemV->isUndef())
1816 return false;
1817 }
1818 return true;
1819}
1820
1822 if (auto *BuildVec = dyn_cast<BuildVectorSDNode>(N)) {
1823 return BuildVec->getSplatValue();
1824 }
1825 return SDValue();
1826}
1827
1829 SelectionDAG &DAG) const {
1830 VECustomDAG CDAG(DAG, Op);
1831 MVT ResultVT = Op.getSimpleValueType();
1832
1833 // If there is just one element, expand to INSERT_VECTOR_ELT.
1834 unsigned UniqueIdx;
1835 if (getUniqueInsertion(Op.getNode(), UniqueIdx)) {
1836 SDValue AccuV = CDAG.getUNDEF(Op.getValueType());
1837 auto ElemV = Op->getOperand(UniqueIdx);
1838 SDValue IdxV = CDAG.getConstant(UniqueIdx, MVT::i64);
1839 return CDAG.getNode(ISD::INSERT_VECTOR_ELT, ResultVT, {AccuV, ElemV, IdxV});
1840 }
1841
1842 // Else emit a broadcast.
1843 if (SDValue ScalarV = getSplatValue(Op.getNode())) {
1844 unsigned NumEls = ResultVT.getVectorNumElements();
1845 auto AVL = CDAG.getConstant(NumEls, MVT::i32);
1846 return CDAG.getBroadcast(ResultVT, ScalarV, AVL);
1847 }
1848
1849 // Expand
1850 return SDValue();
1851}
1852
1855 // Custom legalization on VVP_* and VEC_* opcodes is required to pack-legalize
1856 // these operations (transform nodes such that their AVL parameter refers to
1857 // packs of 64bit, instead of number of elements.
1858
1859 // Packing opcodes are created with a pack-legal AVL (LEGALAVL). No need to
1860 // re-visit them.
1861 if (isPackingSupportOpcode(Op.getOpcode()))
1862 return Legal;
1863
1864 // Custom lower to legalize AVL for packed mode.
1865 if (isVVPOrVEC(Op.getOpcode()))
1866 return Custom;
1867 return Legal;
1868}
1869
1871 LLVM_DEBUG(dbgs() << "::LowerOperation "; Op.dump(&DAG));
1872 unsigned Opcode = Op.getOpcode();
1873
1874 /// Scalar isel.
1875 switch (Opcode) {
1876 case ISD::ATOMIC_FENCE:
1877 return lowerATOMIC_FENCE(Op, DAG);
1878 case ISD::ATOMIC_SWAP:
1879 return lowerATOMIC_SWAP(Op, DAG);
1880 case ISD::BlockAddress:
1881 return lowerBlockAddress(Op, DAG);
1882 case ISD::ConstantPool:
1883 return lowerConstantPool(Op, DAG);
1885 return lowerDYNAMIC_STACKALLOC(Op, DAG);
1887 return lowerEH_SJLJ_LONGJMP(Op, DAG);
1889 return lowerEH_SJLJ_SETJMP(Op, DAG);
1891 return lowerEH_SJLJ_SETUP_DISPATCH(Op, DAG);
1892 case ISD::FRAMEADDR:
1893 return lowerFRAMEADDR(Op, DAG, *this, Subtarget);
1894 case ISD::GlobalAddress:
1895 return lowerGlobalAddress(Op, DAG);
1897 return lowerGlobalTLSAddress(Op, DAG);
1899 return lowerINTRINSIC_WO_CHAIN(Op, DAG);
1900 case ISD::JumpTable:
1901 return lowerJumpTable(Op, DAG);
1902 case ISD::LOAD:
1903 return lowerLOAD(Op, DAG);
1904 case ISD::RETURNADDR:
1905 return lowerRETURNADDR(Op, DAG, *this, Subtarget);
1906 case ISD::BUILD_VECTOR:
1907 return lowerBUILD_VECTOR(Op, DAG);
1908 case ISD::STORE:
1909 return lowerSTORE(Op, DAG);
1910 case ISD::VASTART:
1911 return lowerVASTART(Op, DAG);
1912 case ISD::VAARG:
1913 return lowerVAARG(Op, DAG);
1914
1916 return lowerINSERT_VECTOR_ELT(Op, DAG);
1918 return lowerEXTRACT_VECTOR_ELT(Op, DAG);
1919 }
1920
1921 /// Vector isel.
1922 if (ISD::isVPOpcode(Opcode))
1923 return lowerToVVP(Op, DAG);
1924
1925 switch (Opcode) {
1926 default:
1927 llvm_unreachable("Should not custom lower this!");
1928
1929 // Legalize the AVL of this internal node.
1931#define ADD_VVP_OP(VVP_NAME, ...) case VEISD::VVP_NAME:
1932#include "VVPNodes.def"
1933 // AVL already legalized.
1934 if (getAnnotatedNodeAVL(Op).second)
1935 return Op;
1936 return legalizeInternalVectorOp(Op, DAG);
1937
1938 // Translate into a VEC_*/VVP_* layer operation.
1939 case ISD::MLOAD:
1940 case ISD::MSTORE:
1941#define ADD_VVP_OP(VVP_NAME, ISD_NAME) case ISD::ISD_NAME:
1942#include "VVPNodes.def"
1943 if (isMaskArithmetic(Op) && isPackedVectorType(Op.getValueType()))
1944 return splitMaskArithmetic(Op, DAG);
1945 return lowerToVVP(Op, DAG);
1946 }
1947}
1948/// } Custom Lower
1949
1952 SelectionDAG &DAG) const {
1953 switch (N->getOpcode()) {
1954 case ISD::ATOMIC_SWAP:
1955 // Let LLVM expand atomic swap instruction through LowerOperation.
1956 return;
1957 default:
1958 LLVM_DEBUG(N->dumpr(&DAG));
1959 llvm_unreachable("Do not know how to custom type legalize this operation!");
1960 }
1961}
1962
1963/// JumpTable for VE.
1964///
1965/// VE cannot generate relocatable symbol in jump table. VE cannot
1966/// generate expressions using symbols in both text segment and data
1967/// segment like below.
1968/// .4byte .LBB0_2-.LJTI0_0
1969/// So, we generate offset from the top of function like below as
1970/// a custom label.
1971/// .4byte .LBB0_2-<function name>
1972
1974 // Use custom label for PIC.
1977
1978 // Otherwise, use the normal jump table encoding heuristics.
1980}
1981
1983 const MachineJumpTableInfo *MJTI, const MachineBasicBlock *MBB,
1984 unsigned Uid, MCContext &Ctx) const {
1986
1987 // Generate custom label for PIC like below.
1988 // .4bytes .LBB0_2-<function name>
1989 const auto *Value = MCSymbolRefExpr::create(MBB->getSymbol(), Ctx);
1991 const auto *Base = MCSymbolRefExpr::create(Sym, Ctx);
1992 return MCBinaryExpr::createSub(Value, Base, Ctx);
1993}
1994
1996 SelectionDAG &DAG) const {
1998 SDLoc DL(Table);
2000 assert(Function != nullptr);
2001 auto PtrTy = getPointerTy(DAG.getDataLayout(), Function->getAddressSpace());
2002
2003 // In the jump table, we have following values in PIC mode.
2004 // .4bytes .LBB0_2-<function name>
2005 // We need to add this value and the address of this function to generate
2006 // .LBB0_2 label correctly under PIC mode. So, we want to generate following
2007 // instructions:
2008 // lea %reg, fun@gotoff_lo
2009 // and %reg, %reg, (32)0
2010 // lea.sl %reg, fun@gotoff_hi(%reg, %got)
2011 // In order to do so, we need to genarate correctly marked DAG node using
2012 // makeHiLoPair.
2013 SDValue Op = DAG.getGlobalAddress(Function, DL, PtrTy);
2016 SDValue GlobalBase = DAG.getNode(VEISD::GLOBAL_BASE_REG, DL, PtrTy);
2017 return DAG.getNode(ISD::ADD, DL, PtrTy, GlobalBase, HiLo);
2018}
2019
2022 MachineBasicBlock *TargetBB,
2023 const DebugLoc &DL) const {
2026 const VEInstrInfo *TII = Subtarget->getInstrInfo();
2027
2028 const TargetRegisterClass *RC = &VE::I64RegClass;
2029 Register Tmp1 = MRI.createVirtualRegister(RC);
2030 Register Tmp2 = MRI.createVirtualRegister(RC);
2031 Register Result = MRI.createVirtualRegister(RC);
2032
2033 if (isPositionIndependent()) {
2034 // Create following instructions for local linkage PIC code.
2035 // lea %Tmp1, TargetBB@gotoff_lo
2036 // and %Tmp2, %Tmp1, (32)0
2037 // lea.sl %Result, TargetBB@gotoff_hi(%Tmp2, %s15) ; %s15 is GOT
2038 BuildMI(MBB, I, DL, TII->get(VE::LEAzii), Tmp1)
2039 .addImm(0)
2040 .addImm(0)
2042 BuildMI(MBB, I, DL, TII->get(VE::ANDrm), Tmp2)
2043 .addReg(Tmp1, getKillRegState(true))
2044 .addImm(M0(32));
2045 BuildMI(MBB, I, DL, TII->get(VE::LEASLrri), Result)
2046 .addReg(VE::SX15)
2047 .addReg(Tmp2, getKillRegState(true))
2049 } else {
2050 // Create following instructions for non-PIC code.
2051 // lea %Tmp1, TargetBB@lo
2052 // and %Tmp2, %Tmp1, (32)0
2053 // lea.sl %Result, TargetBB@hi(%Tmp2)
2054 BuildMI(MBB, I, DL, TII->get(VE::LEAzii), Tmp1)
2055 .addImm(0)
2056 .addImm(0)
2057 .addMBB(TargetBB, VEMCExpr::VK_VE_LO32);
2058 BuildMI(MBB, I, DL, TII->get(VE::ANDrm), Tmp2)
2059 .addReg(Tmp1, getKillRegState(true))
2060 .addImm(M0(32));
2061 BuildMI(MBB, I, DL, TII->get(VE::LEASLrii), Result)
2062 .addReg(Tmp2, getKillRegState(true))
2063 .addImm(0)
2064 .addMBB(TargetBB, VEMCExpr::VK_VE_HI32);
2065 }
2066 return Result;
2067}
2068
2071 StringRef Symbol, const DebugLoc &DL,
2072 bool IsLocal = false,
2073 bool IsCall = false) const {
2076 const VEInstrInfo *TII = Subtarget->getInstrInfo();
2077
2078 const TargetRegisterClass *RC = &VE::I64RegClass;
2079 Register Result = MRI.createVirtualRegister(RC);
2080
2081 if (isPositionIndependent()) {
2082 if (IsCall && !IsLocal) {
2083 // Create following instructions for non-local linkage PIC code function
2084 // calls. These instructions uses IC and magic number -24, so we expand
2085 // them in VEAsmPrinter.cpp from GETFUNPLT pseudo instruction.
2086 // lea %Reg, Symbol@plt_lo(-24)
2087 // and %Reg, %Reg, (32)0
2088 // sic %s16
2089 // lea.sl %Result, Symbol@plt_hi(%Reg, %s16) ; %s16 is PLT
2090 BuildMI(MBB, I, DL, TII->get(VE::GETFUNPLT), Result)
2091 .addExternalSymbol("abort");
2092 } else if (IsLocal) {
2093 Register Tmp1 = MRI.createVirtualRegister(RC);
2094 Register Tmp2 = MRI.createVirtualRegister(RC);
2095 // Create following instructions for local linkage PIC code.
2096 // lea %Tmp1, Symbol@gotoff_lo
2097 // and %Tmp2, %Tmp1, (32)0
2098 // lea.sl %Result, Symbol@gotoff_hi(%Tmp2, %s15) ; %s15 is GOT
2099 BuildMI(MBB, I, DL, TII->get(VE::LEAzii), Tmp1)
2100 .addImm(0)
2101 .addImm(0)
2103 BuildMI(MBB, I, DL, TII->get(VE::ANDrm), Tmp2)
2104 .addReg(Tmp1, getKillRegState(true))
2105 .addImm(M0(32));
2106 BuildMI(MBB, I, DL, TII->get(VE::LEASLrri), Result)
2107 .addReg(VE::SX15)
2108 .addReg(Tmp2, getKillRegState(true))
2110 } else {
2111 Register Tmp1 = MRI.createVirtualRegister(RC);
2112 Register Tmp2 = MRI.createVirtualRegister(RC);
2113 // Create following instructions for not local linkage PIC code.
2114 // lea %Tmp1, Symbol@got_lo
2115 // and %Tmp2, %Tmp1, (32)0
2116 // lea.sl %Tmp3, Symbol@gotoff_hi(%Tmp2, %s15) ; %s15 is GOT
2117 // ld %Result, 0(%Tmp3)
2118 Register Tmp3 = MRI.createVirtualRegister(RC);
2119 BuildMI(MBB, I, DL, TII->get(VE::LEAzii), Tmp1)
2120 .addImm(0)
2121 .addImm(0)
2123 BuildMI(MBB, I, DL, TII->get(VE::ANDrm), Tmp2)
2124 .addReg(Tmp1, getKillRegState(true))
2125 .addImm(M0(32));
2126 BuildMI(MBB, I, DL, TII->get(VE::LEASLrri), Tmp3)
2127 .addReg(VE::SX15)
2128 .addReg(Tmp2, getKillRegState(true))
2130 BuildMI(MBB, I, DL, TII->get(VE::LDrii), Result)
2131 .addReg(Tmp3, getKillRegState(true))
2132 .addImm(0)
2133 .addImm(0);
2134 }
2135 } else {
2136 Register Tmp1 = MRI.createVirtualRegister(RC);
2137 Register Tmp2 = MRI.createVirtualRegister(RC);
2138 // Create following instructions for non-PIC code.
2139 // lea %Tmp1, Symbol@lo
2140 // and %Tmp2, %Tmp1, (32)0
2141 // lea.sl %Result, Symbol@hi(%Tmp2)
2142 BuildMI(MBB, I, DL, TII->get(VE::LEAzii), Tmp1)
2143 .addImm(0)
2144 .addImm(0)
2145 .addExternalSymbol(Symbol.data(), VEMCExpr::VK_VE_LO32);
2146 BuildMI(MBB, I, DL, TII->get(VE::ANDrm), Tmp2)
2147 .addReg(Tmp1, getKillRegState(true))
2148 .addImm(M0(32));
2149 BuildMI(MBB, I, DL, TII->get(VE::LEASLrii), Result)
2150 .addReg(Tmp2, getKillRegState(true))
2151 .addImm(0)
2152 .addExternalSymbol(Symbol.data(), VEMCExpr::VK_VE_HI32);
2153 }
2154 return Result;
2155}
2156
2159 MachineBasicBlock *DispatchBB,
2160 int FI, int Offset) const {
2161 DebugLoc DL = MI.getDebugLoc();
2162 const VEInstrInfo *TII = Subtarget->getInstrInfo();
2163
2164 Register LabelReg =
2166
2167 // Store an address of DispatchBB to a given jmpbuf[1] where has next IC
2168 // referenced by longjmp (throw) later.
2169 MachineInstrBuilder MIB = BuildMI(*MBB, MI, DL, TII->get(VE::STrii));
2170 addFrameReference(MIB, FI, Offset); // jmpbuf[1]
2171 MIB.addReg(LabelReg, getKillRegState(true));
2172}
2173
2176 MachineBasicBlock *MBB) const {
2177 DebugLoc DL = MI.getDebugLoc();
2178 MachineFunction *MF = MBB->getParent();
2179 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
2180 const TargetRegisterInfo *TRI = Subtarget->getRegisterInfo();
2182
2183 const BasicBlock *BB = MBB->getBasicBlock();
2185
2186 // Memory Reference.
2187 SmallVector<MachineMemOperand *, 2> MMOs(MI.memoperands_begin(),
2188 MI.memoperands_end());
2189 Register BufReg = MI.getOperand(1).getReg();
2190
2191 Register DstReg;
2192
2193 DstReg = MI.getOperand(0).getReg();
2194 const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
2195 assert(TRI->isTypeLegalForClass(*RC, MVT::i32) && "Invalid destination!");
2196 (void)TRI;
2197 Register MainDestReg = MRI.createVirtualRegister(RC);
2198 Register RestoreDestReg = MRI.createVirtualRegister(RC);
2199
2200 // For `v = call @llvm.eh.sjlj.setjmp(buf)`, we generate following
2201 // instructions. SP/FP must be saved in jmpbuf before `llvm.eh.sjlj.setjmp`.
2202 //
2203 // ThisMBB:
2204 // buf[3] = %s17 iff %s17 is used as BP
2205 // buf[1] = RestoreMBB as IC after longjmp
2206 // # SjLjSetup RestoreMBB
2207 //
2208 // MainMBB:
2209 // v_main = 0
2210 //
2211 // SinkMBB:
2212 // v = phi(v_main, MainMBB, v_restore, RestoreMBB)
2213 // ...
2214 //
2215 // RestoreMBB:
2216 // %s17 = buf[3] = iff %s17 is used as BP
2217 // v_restore = 1
2218 // goto SinkMBB
2219
2220 MachineBasicBlock *ThisMBB = MBB;
2221 MachineBasicBlock *MainMBB = MF->CreateMachineBasicBlock(BB);
2222 MachineBasicBlock *SinkMBB = MF->CreateMachineBasicBlock(BB);
2223 MachineBasicBlock *RestoreMBB = MF->CreateMachineBasicBlock(BB);
2224 MF->insert(I, MainMBB);
2225 MF->insert(I, SinkMBB);
2226 MF->push_back(RestoreMBB);
2227 RestoreMBB->setMachineBlockAddressTaken();
2228
2229 // Transfer the remainder of BB and its successor edges to SinkMBB.
2230 SinkMBB->splice(SinkMBB->begin(), MBB,
2231 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
2233
2234 // ThisMBB:
2235 Register LabelReg =
2237
2238 // Store BP in buf[3] iff this function is using BP.
2239 const VEFrameLowering *TFI = Subtarget->getFrameLowering();
2240 if (TFI->hasBP(*MF)) {
2241 MachineInstrBuilder MIB = BuildMI(*MBB, MI, DL, TII->get(VE::STrii));
2242 MIB.addReg(BufReg);
2243 MIB.addImm(0);
2244 MIB.addImm(24);
2245 MIB.addReg(VE::SX17);
2246 MIB.setMemRefs(MMOs);
2247 }
2248
2249 // Store IP in buf[1].
2250 MachineInstrBuilder MIB = BuildMI(*MBB, MI, DL, TII->get(VE::STrii));
2251 MIB.add(MI.getOperand(1)); // we can preserve the kill flags here.
2252 MIB.addImm(0);
2253 MIB.addImm(8);
2254 MIB.addReg(LabelReg, getKillRegState(true));
2255 MIB.setMemRefs(MMOs);
2256
2257 // SP/FP are already stored in jmpbuf before `llvm.eh.sjlj.setjmp`.
2258
2259 // Insert setup.
2260 MIB =
2261 BuildMI(*ThisMBB, MI, DL, TII->get(VE::EH_SjLj_Setup)).addMBB(RestoreMBB);
2262
2263 const VERegisterInfo *RegInfo = Subtarget->getRegisterInfo();
2264 MIB.addRegMask(RegInfo->getNoPreservedMask());
2265 ThisMBB->addSuccessor(MainMBB);
2266 ThisMBB->addSuccessor(RestoreMBB);
2267
2268 // MainMBB:
2269 BuildMI(MainMBB, DL, TII->get(VE::LEAzii), MainDestReg)
2270 .addImm(0)
2271 .addImm(0)
2272 .addImm(0);
2273 MainMBB->addSuccessor(SinkMBB);
2274
2275 // SinkMBB:
2276 BuildMI(*SinkMBB, SinkMBB->begin(), DL, TII->get(VE::PHI), DstReg)
2277 .addReg(MainDestReg)
2278 .addMBB(MainMBB)
2279 .addReg(RestoreDestReg)
2280 .addMBB(RestoreMBB);
2281
2282 // RestoreMBB:
2283 // Restore BP from buf[3] iff this function is using BP. The address of
2284 // buf is in SX10.
2285 // FIXME: Better to not use SX10 here
2286 if (TFI->hasBP(*MF)) {
2288 BuildMI(RestoreMBB, DL, TII->get(VE::LDrii), VE::SX17);
2289 MIB.addReg(VE::SX10);
2290 MIB.addImm(0);
2291 MIB.addImm(24);
2292 MIB.setMemRefs(MMOs);
2293 }
2294 BuildMI(RestoreMBB, DL, TII->get(VE::LEAzii), RestoreDestReg)
2295 .addImm(0)
2296 .addImm(0)
2297 .addImm(1);
2298 BuildMI(RestoreMBB, DL, TII->get(VE::BRCFLa_t)).addMBB(SinkMBB);
2299 RestoreMBB->addSuccessor(SinkMBB);
2300
2301 MI.eraseFromParent();
2302 return SinkMBB;
2303}
2304
2307 MachineBasicBlock *MBB) const {
2308 DebugLoc DL = MI.getDebugLoc();
2309 MachineFunction *MF = MBB->getParent();
2310 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
2312
2313 // Memory Reference.
2314 SmallVector<MachineMemOperand *, 2> MMOs(MI.memoperands_begin(),
2315 MI.memoperands_end());
2316 Register BufReg = MI.getOperand(0).getReg();
2317
2318 Register Tmp = MRI.createVirtualRegister(&VE::I64RegClass);
2319 // Since FP is only updated here but NOT referenced, it's treated as GPR.
2320 Register FP = VE::SX9;
2321 Register SP = VE::SX11;
2322
2324
2325 MachineBasicBlock *ThisMBB = MBB;
2326
2327 // For `call @llvm.eh.sjlj.longjmp(buf)`, we generate following instructions.
2328 //
2329 // ThisMBB:
2330 // %fp = load buf[0]
2331 // %jmp = load buf[1]
2332 // %s10 = buf ; Store an address of buf to SX10 for RestoreMBB
2333 // %sp = load buf[2] ; generated by llvm.eh.sjlj.setjmp.
2334 // jmp %jmp
2335
2336 // Reload FP.
2337 MIB = BuildMI(*ThisMBB, MI, DL, TII->get(VE::LDrii), FP);
2338 MIB.addReg(BufReg);
2339 MIB.addImm(0);
2340 MIB.addImm(0);
2341 MIB.setMemRefs(MMOs);
2342
2343 // Reload IP.
2344 MIB = BuildMI(*ThisMBB, MI, DL, TII->get(VE::LDrii), Tmp);
2345 MIB.addReg(BufReg);
2346 MIB.addImm(0);
2347 MIB.addImm(8);
2348 MIB.setMemRefs(MMOs);
2349
2350 // Copy BufReg to SX10 for later use in setjmp.
2351 // FIXME: Better to not use SX10 here
2352 BuildMI(*ThisMBB, MI, DL, TII->get(VE::ORri), VE::SX10)
2353 .addReg(BufReg)
2354 .addImm(0);
2355
2356 // Reload SP.
2357 MIB = BuildMI(*ThisMBB, MI, DL, TII->get(VE::LDrii), SP);
2358 MIB.add(MI.getOperand(0)); // we can preserve the kill flags here.
2359 MIB.addImm(0);
2360 MIB.addImm(16);
2361 MIB.setMemRefs(MMOs);
2362
2363 // Jump.
2364 BuildMI(*ThisMBB, MI, DL, TII->get(VE::BCFLari_t))
2365 .addReg(Tmp, getKillRegState(true))
2366 .addImm(0);
2367
2368 MI.eraseFromParent();
2369 return ThisMBB;
2370}
2371
2374 MachineBasicBlock *BB) const {
2375 DebugLoc DL = MI.getDebugLoc();
2376 MachineFunction *MF = BB->getParent();
2377 MachineFrameInfo &MFI = MF->getFrameInfo();
2379 const VEInstrInfo *TII = Subtarget->getInstrInfo();
2380 int FI = MFI.getFunctionContextIndex();
2381
2382 // Get a mapping of the call site numbers to all of the landing pads they're
2383 // associated with.
2385 unsigned MaxCSNum = 0;
2386 for (auto &MBB : *MF) {
2387 if (!MBB.isEHPad())
2388 continue;
2389
2390 MCSymbol *Sym = nullptr;
2391 for (const auto &MI : MBB) {
2392 if (MI.isDebugInstr())
2393 continue;
2394
2395 assert(MI.isEHLabel() && "expected EH_LABEL");
2396 Sym = MI.getOperand(0).getMCSymbol();
2397 break;
2398 }
2399
2400 if (!MF->hasCallSiteLandingPad(Sym))
2401 continue;
2402
2403 for (unsigned CSI : MF->getCallSiteLandingPad(Sym)) {
2404 CallSiteNumToLPad[CSI].push_back(&MBB);
2405 MaxCSNum = std::max(MaxCSNum, CSI);
2406 }
2407 }
2408
2409 // Get an ordered list of the machine basic blocks for the jump table.
2410 std::vector<MachineBasicBlock *> LPadList;
2412 LPadList.reserve(CallSiteNumToLPad.size());
2413
2414 for (unsigned CSI = 1; CSI <= MaxCSNum; ++CSI) {
2415 for (auto &LP : CallSiteNumToLPad[CSI]) {
2416 LPadList.push_back(LP);
2417 InvokeBBs.insert(LP->pred_begin(), LP->pred_end());
2418 }
2419 }
2420
2421 assert(!LPadList.empty() &&
2422 "No landing pad destinations for the dispatch jump table!");
2423
2424 // The %fn_context is allocated like below (from --print-after=sjljehprepare):
2425 // %fn_context = alloca { i8*, i64, [4 x i64], i8*, i8*, [5 x i8*] }
2426 //
2427 // This `[5 x i8*]` is jmpbuf, so jmpbuf[1] is FI+72.
2428 // First `i64` is callsite, so callsite is FI+8.
2429 static const int OffsetIC = 72;
2430 static const int OffsetCS = 8;
2431
2432 // Create the MBBs for the dispatch code like following:
2433 //
2434 // ThisMBB:
2435 // Prepare DispatchBB address and store it to buf[1].
2436 // ...
2437 //
2438 // DispatchBB:
2439 // %s15 = GETGOT iff isPositionIndependent
2440 // %callsite = load callsite
2441 // brgt.l.t #size of callsites, %callsite, DispContBB
2442 //
2443 // TrapBB:
2444 // Call abort.
2445 //
2446 // DispContBB:
2447 // %breg = address of jump table
2448 // %pc = load and calculate next pc from %breg and %callsite
2449 // jmp %pc
2450
2451 // Shove the dispatch's address into the return slot in the function context.
2452 MachineBasicBlock *DispatchBB = MF->CreateMachineBasicBlock();
2453 DispatchBB->setIsEHPad(true);
2454
2455 // Trap BB will causes trap like `assert(0)`.
2457 DispatchBB->addSuccessor(TrapBB);
2458
2459 MachineBasicBlock *DispContBB = MF->CreateMachineBasicBlock();
2460 DispatchBB->addSuccessor(DispContBB);
2461
2462 // Insert MBBs.
2463 MF->push_back(DispatchBB);
2464 MF->push_back(DispContBB);
2465 MF->push_back(TrapBB);
2466
2467 // Insert code to call abort in the TrapBB.
2468 Register Abort = prepareSymbol(*TrapBB, TrapBB->end(), "abort", DL,
2469 /* Local */ false, /* Call */ true);
2470 BuildMI(TrapBB, DL, TII->get(VE::BSICrii), VE::SX10)
2471 .addReg(Abort, getKillRegState(true))
2472 .addImm(0)
2473 .addImm(0);
2474
2475 // Insert code into the entry block that creates and registers the function
2476 // context.
2477 setupEntryBlockForSjLj(MI, BB, DispatchBB, FI, OffsetIC);
2478
2479 // Create the jump table and associated information
2480 unsigned JTE = getJumpTableEncoding();
2482 unsigned MJTI = JTI->createJumpTableIndex(LPadList);
2483
2484 const VERegisterInfo &RI = TII->getRegisterInfo();
2485 // Add a register mask with no preserved registers. This results in all
2486 // registers being marked as clobbered.
2487 BuildMI(DispatchBB, DL, TII->get(VE::NOP))
2489
2490 if (isPositionIndependent()) {
2491 // Force to generate GETGOT, since current implementation doesn't store GOT
2492 // register.
2493 BuildMI(DispatchBB, DL, TII->get(VE::GETGOT), VE::SX15);
2494 }
2495
2496 // IReg is used as an index in a memory operand and therefore can't be SP
2497 const TargetRegisterClass *RC = &VE::I64RegClass;
2498 Register IReg = MRI.createVirtualRegister(RC);
2499 addFrameReference(BuildMI(DispatchBB, DL, TII->get(VE::LDLZXrii), IReg), FI,
2500 OffsetCS);
2501 if (LPadList.size() < 64) {
2502 BuildMI(DispatchBB, DL, TII->get(VE::BRCFLir_t))
2504 .addImm(LPadList.size())
2505 .addReg(IReg)
2506 .addMBB(TrapBB);
2507 } else {
2508 assert(LPadList.size() <= 0x7FFFFFFF && "Too large Landing Pad!");
2509 Register TmpReg = MRI.createVirtualRegister(RC);
2510 BuildMI(DispatchBB, DL, TII->get(VE::LEAzii), TmpReg)
2511 .addImm(0)
2512 .addImm(0)
2513 .addImm(LPadList.size());
2514 BuildMI(DispatchBB, DL, TII->get(VE::BRCFLrr_t))
2516 .addReg(TmpReg, getKillRegState(true))
2517 .addReg(IReg)
2518 .addMBB(TrapBB);
2519 }
2520
2521 Register BReg = MRI.createVirtualRegister(RC);
2522 Register Tmp1 = MRI.createVirtualRegister(RC);
2523 Register Tmp2 = MRI.createVirtualRegister(RC);
2524
2525 if (isPositionIndependent()) {
2526 // Create following instructions for local linkage PIC code.
2527 // lea %Tmp1, .LJTI0_0@gotoff_lo
2528 // and %Tmp2, %Tmp1, (32)0
2529 // lea.sl %BReg, .LJTI0_0@gotoff_hi(%Tmp2, %s15) ; %s15 is GOT
2530 BuildMI(DispContBB, DL, TII->get(VE::LEAzii), Tmp1)
2531 .addImm(0)
2532 .addImm(0)
2534 BuildMI(DispContBB, DL, TII->get(VE::ANDrm), Tmp2)
2535 .addReg(Tmp1, getKillRegState(true))
2536 .addImm(M0(32));
2537 BuildMI(DispContBB, DL, TII->get(VE::LEASLrri), BReg)
2538 .addReg(VE::SX15)
2539 .addReg(Tmp2, getKillRegState(true))
2541 } else {
2542 // Create following instructions for non-PIC code.
2543 // lea %Tmp1, .LJTI0_0@lo
2544 // and %Tmp2, %Tmp1, (32)0
2545 // lea.sl %BReg, .LJTI0_0@hi(%Tmp2)
2546 BuildMI(DispContBB, DL, TII->get(VE::LEAzii), Tmp1)
2547 .addImm(0)
2548 .addImm(0)
2550 BuildMI(DispContBB, DL, TII->get(VE::ANDrm), Tmp2)
2551 .addReg(Tmp1, getKillRegState(true))
2552 .addImm(M0(32));
2553 BuildMI(DispContBB, DL, TII->get(VE::LEASLrii), BReg)
2554 .addReg(Tmp2, getKillRegState(true))
2555 .addImm(0)
2557 }
2558
2559 switch (JTE) {
2561 // Generate simple block address code for no-PIC model.
2562 // sll %Tmp1, %IReg, 3
2563 // lds %TReg, 0(%Tmp1, %BReg)
2564 // bcfla %TReg
2565
2566 Register TReg = MRI.createVirtualRegister(RC);
2567 Register Tmp1 = MRI.createVirtualRegister(RC);
2568
2569 BuildMI(DispContBB, DL, TII->get(VE::SLLri), Tmp1)
2570 .addReg(IReg, getKillRegState(true))
2571 .addImm(3);
2572 BuildMI(DispContBB, DL, TII->get(VE::LDrri), TReg)
2573 .addReg(BReg, getKillRegState(true))
2574 .addReg(Tmp1, getKillRegState(true))
2575 .addImm(0);
2576 BuildMI(DispContBB, DL, TII->get(VE::BCFLari_t))
2577 .addReg(TReg, getKillRegState(true))
2578 .addImm(0);
2579 break;
2580 }
2582 // Generate block address code using differences from the function pointer
2583 // for PIC model.
2584 // sll %Tmp1, %IReg, 2
2585 // ldl.zx %OReg, 0(%Tmp1, %BReg)
2586 // Prepare function address in BReg2.
2587 // adds.l %TReg, %BReg2, %OReg
2588 // bcfla %TReg
2589
2591 Register OReg = MRI.createVirtualRegister(RC);
2592 Register TReg = MRI.createVirtualRegister(RC);
2593 Register Tmp1 = MRI.createVirtualRegister(RC);
2594
2595 BuildMI(DispContBB, DL, TII->get(VE::SLLri), Tmp1)
2596 .addReg(IReg, getKillRegState(true))
2597 .addImm(2);
2598 BuildMI(DispContBB, DL, TII->get(VE::LDLZXrri), OReg)
2599 .addReg(BReg, getKillRegState(true))
2600 .addReg(Tmp1, getKillRegState(true))
2601 .addImm(0);
2602 Register BReg2 =
2603 prepareSymbol(*DispContBB, DispContBB->end(),
2604 DispContBB->getParent()->getName(), DL, /* Local */ true);
2605 BuildMI(DispContBB, DL, TII->get(VE::ADDSLrr), TReg)
2606 .addReg(OReg, getKillRegState(true))
2607 .addReg(BReg2, getKillRegState(true));
2608 BuildMI(DispContBB, DL, TII->get(VE::BCFLari_t))
2609 .addReg(TReg, getKillRegState(true))
2610 .addImm(0);
2611 break;
2612 }
2613 default:
2614 llvm_unreachable("Unexpected jump table encoding");
2615 }
2616
2617 // Add the jump table entries as successors to the MBB.
2619 for (auto &LP : LPadList)
2620 if (SeenMBBs.insert(LP).second)
2621 DispContBB->addSuccessor(LP);
2622
2623 // N.B. the order the invoke BBs are processed in doesn't matter here.
2625 const MCPhysReg *SavedRegs = MF->getRegInfo().getCalleeSavedRegs();
2626 for (MachineBasicBlock *MBB : InvokeBBs) {
2627 // Remove the landing pad successor from the invoke block and replace it
2628 // with the new dispatch block.
2629 // Keep a copy of Successors since it's modified inside the loop.
2631 MBB->succ_rend());
2632 // FIXME: Avoid quadratic complexity.
2633 for (auto *MBBS : Successors) {
2634 if (MBBS->isEHPad()) {
2635 MBB->removeSuccessor(MBBS);
2636 MBBLPads.push_back(MBBS);
2637 }
2638 }
2639
2640 MBB->addSuccessor(DispatchBB);
2641
2642 // Find the invoke call and mark all of the callee-saved registers as
2643 // 'implicit defined' so that they're spilled. This prevents code from
2644 // moving instructions to before the EH block, where they will never be
2645 // executed.
2646 for (auto &II : reverse(*MBB)) {
2647 if (!II.isCall())
2648 continue;
2649
2651 for (auto &MOp : II.operands())
2652 if (MOp.isReg())
2653 DefRegs[MOp.getReg()] = true;
2654
2655 MachineInstrBuilder MIB(*MF, &II);
2656 for (unsigned RI = 0; SavedRegs[RI]; ++RI) {
2657 Register Reg = SavedRegs[RI];
2658 if (!DefRegs[Reg])
2660 }
2661
2662 break;
2663 }
2664 }
2665
2666 // Mark all former landing pads as non-landing pads. The dispatch is the only
2667 // landing pad now.
2668 for (auto &LP : MBBLPads)
2669 LP->setIsEHPad(false);
2670
2671 // The instruction is gone now.
2672 MI.eraseFromParent();
2673 return BB;
2674}
2675
2678 MachineBasicBlock *BB) const {
2679 switch (MI.getOpcode()) {
2680 default:
2681 llvm_unreachable("Unknown Custom Instruction!");
2682 case VE::EH_SjLj_LongJmp:
2683 return emitEHSjLjLongJmp(MI, BB);
2684 case VE::EH_SjLj_SetJmp:
2685 return emitEHSjLjSetJmp(MI, BB);
2686 case VE::EH_SjLj_Setup_Dispatch:
2687 return emitSjLjDispatchBlock(MI, BB);
2688 }
2689}
2690
2691static bool isSimm7(SDValue V) {
2692 EVT VT = V.getValueType();
2693 if (VT.isVector())
2694 return false;
2695
2696 if (VT.isInteger()) {
2697 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(V))
2698 return isInt<7>(C->getSExtValue());
2699 } else if (VT.isFloatingPoint()) {
2700 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(V)) {
2701 if (VT == MVT::f32 || VT == MVT::f64) {
2702 const APInt &Imm = C->getValueAPF().bitcastToAPInt();
2703 uint64_t Val = Imm.getSExtValue();
2704 if (Imm.getBitWidth() == 32)
2705 Val <<= 32; // Immediate value of float place at higher bits on VE.
2706 return isInt<7>(Val);
2707 }
2708 }
2709 }
2710 return false;
2711}
2712
2713static bool isMImm(SDValue V) {
2714 EVT VT = V.getValueType();
2715 if (VT.isVector())
2716 return false;
2717
2718 if (VT.isInteger()) {
2719 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(V))
2720 return isMImmVal(getImmVal(C));
2721 } else if (VT.isFloatingPoint()) {
2722 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(V)) {
2723 if (VT == MVT::f32) {
2724 // Float value places at higher bits, so ignore lower 32 bits.
2725 return isMImm32Val(getFpImmVal(C) >> 32);
2726 } else if (VT == MVT::f64) {
2727 return isMImmVal(getFpImmVal(C));
2728 }
2729 }
2730 }
2731 return false;
2732}
2733
2734static unsigned decideComp(EVT SrcVT, ISD::CondCode CC) {
2735 if (SrcVT.isFloatingPoint()) {
2736 if (SrcVT == MVT::f128)
2737 return VEISD::CMPQ;
2738 return VEISD::CMPF;
2739 }
2740 return isSignedIntSetCC(CC) ? VEISD::CMPI : VEISD::CMPU;
2741}
2742
2743static EVT decideCompType(EVT SrcVT) {
2744 if (SrcVT == MVT::f128)
2745 return MVT::f64;
2746 return SrcVT;
2747}
2748
2750 bool WithCMov) {
2751 if (SrcVT.isFloatingPoint()) {
2752 // For the case of floating point setcc, only unordered comparison
2753 // or general comparison with -enable-no-nans-fp-math option reach
2754 // here, so it is safe even if values are NaN. Only f128 doesn't
2755 // safe since VE uses f64 result of f128 comparison.
2756 return SrcVT != MVT::f128;
2757 }
2758 if (isIntEqualitySetCC(CC)) {
2759 // For the case of equal or not equal, it is safe without comparison with 0.
2760 return true;
2761 }
2762 if (WithCMov) {
2763 // For the case of integer setcc with cmov, all signed comparison with 0
2764 // are safe.
2765 return isSignedIntSetCC(CC);
2766 }
2767 // For the case of integer setcc, only signed 64 bits comparison is safe.
2768 // For unsigned, "CMPU 0x80000000, 0" has to be greater than 0, but it becomes
2769 // less than 0 witout CMPU. For 32 bits, other half of 32 bits are
2770 // uncoditional, so it is not safe too without CMPI..
2771 return isSignedIntSetCC(CC) && SrcVT == MVT::i64;
2772}
2773
2775 ISD::CondCode CC, bool WithCMov,
2776 const SDLoc &DL, SelectionDAG &DAG) {
2777 // Compare values. If RHS is 0 and it is safe to calculate without
2778 // comparison, we don't generate an instruction for comparison.
2779 EVT CompVT = decideCompType(VT);
2780 if (CompVT == VT && safeWithoutCompWithNull(VT, CC, WithCMov) &&
2782 return LHS;
2783 }
2784 return DAG.getNode(decideComp(VT, CC), DL, CompVT, LHS, RHS);
2785}
2786
2788 DAGCombinerInfo &DCI) const {
2789 assert(N->getOpcode() == ISD::SELECT &&
2790 "Should be called with a SELECT node");
2792 SDValue Cond = N->getOperand(0);
2793 SDValue True = N->getOperand(1);
2794 SDValue False = N->getOperand(2);
2795
2796 // We handle only scalar SELECT.
2797 EVT VT = N->getValueType(0);
2798 if (VT.isVector())
2799 return SDValue();
2800
2801 // Peform combineSelect after leagalize DAG.
2802 if (!DCI.isAfterLegalizeDAG())
2803 return SDValue();
2804
2805 EVT VT0 = Cond.getValueType();
2806 if (isMImm(True)) {
2807 // VE's condition move can handle MImm in True clause, so nothing to do.
2808 } else if (isMImm(False)) {
2809 // VE's condition move can handle MImm in True clause, so swap True and
2810 // False clauses if False has MImm value. And, update condition code.
2811 std::swap(True, False);
2812 CC = getSetCCInverse(CC, VT0);
2813 }
2814
2815 SDLoc DL(N);
2816 SelectionDAG &DAG = DCI.DAG;
2817 VECC::CondCode VECCVal;
2818 if (VT0.isFloatingPoint()) {
2819 VECCVal = fpCondCode2Fcc(CC);
2820 } else {
2821 VECCVal = intCondCode2Icc(CC);
2822 }
2823 SDValue Ops[] = {Cond, True, False,
2824 DAG.getConstant(VECCVal, DL, MVT::i32)};
2825 return DAG.getNode(VEISD::CMOV, DL, VT, Ops);
2826}
2827
2829 DAGCombinerInfo &DCI) const {
2830 assert(N->getOpcode() == ISD::SELECT_CC &&
2831 "Should be called with a SELECT_CC node");
2832 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(4))->get();
2833 SDValue LHS = N->getOperand(0);
2834 SDValue RHS = N->getOperand(1);
2835 SDValue True = N->getOperand(2);
2836 SDValue False = N->getOperand(3);
2837
2838 // We handle only scalar SELECT_CC.
2839 EVT VT = N->getValueType(0);
2840 if (VT.isVector())
2841 return SDValue();
2842
2843 // Peform combineSelectCC after leagalize DAG.
2844 if (!DCI.isAfterLegalizeDAG())
2845 return SDValue();
2846
2847 // We handle only i32/i64/f32/f64/f128 comparisons.
2848 EVT LHSVT = LHS.getValueType();
2849 assert(LHSVT == RHS.getValueType());
2850 switch (LHSVT.getSimpleVT().SimpleTy) {
2851 case MVT::i32:
2852 case MVT::i64:
2853 case MVT::f32:
2854 case MVT::f64:
2855 case MVT::f128:
2856 break;
2857 default:
2858 // Return SDValue to let llvm handle other types.
2859 return SDValue();
2860 }
2861
2862 if (isMImm(RHS)) {
2863 // VE's comparison can handle MImm in RHS, so nothing to do.
2864 } else if (isSimm7(RHS)) {
2865 // VE's comparison can handle Simm7 in LHS, so swap LHS and RHS, and
2866 // update condition code.
2867 std::swap(LHS, RHS);
2868 CC = getSetCCSwappedOperands(CC);
2869 }
2870 if (isMImm(True)) {
2871 // VE's condition move can handle MImm in True clause, so nothing to do.
2872 } else if (isMImm(False)) {
2873 // VE's condition move can handle MImm in True clause, so swap True and
2874 // False clauses if False has MImm value. And, update condition code.
2875 std::swap(True, False);
2876 CC = getSetCCInverse(CC, LHSVT);
2877 }
2878
2879 SDLoc DL(N);
2880 SelectionDAG &DAG = DCI.DAG;
2881
2882 bool WithCMov = true;
2883 SDValue CompNode = generateComparison(LHSVT, LHS, RHS, CC, WithCMov, DL, DAG);
2884
2885 VECC::CondCode VECCVal;
2886 if (LHSVT.isFloatingPoint()) {
2887 VECCVal = fpCondCode2Fcc(CC);
2888 } else {
2889 VECCVal = intCondCode2Icc(CC);
2890 }
2891 SDValue Ops[] = {CompNode, True, False,
2892 DAG.getConstant(VECCVal, DL, MVT::i32)};
2893 return DAG.getNode(VEISD::CMOV, DL, VT, Ops);
2894}
2895
2896static bool isI32InsnAllUses(const SDNode *User, const SDNode *N);
2897static bool isI32Insn(const SDNode *User, const SDNode *N) {
2898 switch (User->getOpcode()) {
2899 default:
2900 return false;
2901 case ISD::ADD:
2902 case ISD::SUB:
2903 case ISD::MUL:
2904 case ISD::SDIV:
2905 case ISD::UDIV:
2906 case ISD::SETCC:
2907 case ISD::SMIN:
2908 case ISD::SMAX:
2909 case ISD::SHL:
2910 case ISD::SRA:
2911 case ISD::BSWAP:
2912 case ISD::SINT_TO_FP:
2913 case ISD::UINT_TO_FP:
2914 case ISD::BR_CC:
2915 case ISD::BITCAST:
2917 case ISD::ATOMIC_SWAP:
2918 case VEISD::CMPU:
2919 case VEISD::CMPI:
2920 return true;
2921 case ISD::SRL:
2922 if (N->getOperand(0).getOpcode() != ISD::SRL)
2923 return true;
2924 // (srl (trunc (srl ...))) may be optimized by combining srl, so
2925 // doesn't optimize trunc now.
2926 return false;
2927 case ISD::SELECT_CC:
2928 if (User->getOperand(2).getNode() != N &&
2929 User->getOperand(3).getNode() != N)
2930 return true;
2931 return isI32InsnAllUses(User, N);
2932 case VEISD::CMOV:
2933 // CMOV in (cmov (trunc ...), true, false, int-comparison) is safe.
2934 // However, trunc in true or false clauses is not safe.
2935 if (User->getOperand(1).getNode() != N &&
2936 User->getOperand(2).getNode() != N &&
2937 isa<ConstantSDNode>(User->getOperand(3))) {
2938 VECC::CondCode VECCVal =
2939 static_cast<VECC::CondCode>(User->getConstantOperandVal(3));
2940 return isIntVECondCode(VECCVal);
2941 }
2942 [[fallthrough]];
2943 case ISD::AND:
2944 case ISD::OR:
2945 case ISD::XOR:
2946 case ISD::SELECT:
2947 case ISD::CopyToReg:
2948 // Check all use of selections, bit operations, and copies. If all of them
2949 // are safe, optimize truncate to extract_subreg.
2950 return isI32InsnAllUses(User, N);
2951 }
2952}
2953
2954static bool isI32InsnAllUses(const SDNode *User, const SDNode *N) {
2955 // Check all use of User node. If all of them are safe, optimize
2956 // truncate to extract_subreg.
2957 for (const SDNode *U : User->uses()) {
2958 switch (U->getOpcode()) {
2959 default:
2960 // If the use is an instruction which treats the source operand as i32,
2961 // it is safe to avoid truncate here.
2962 if (isI32Insn(U, N))
2963 continue;
2964 break;
2965 case ISD::ANY_EXTEND:
2966 case ISD::SIGN_EXTEND:
2967 case ISD::ZERO_EXTEND: {
2968 // Special optimizations to the combination of ext and trunc.
2969 // (ext ... (select ... (trunc ...))) is safe to avoid truncate here
2970 // since this truncate instruction clears higher 32 bits which is filled
2971 // by one of ext instructions later.
2972 assert(N->getValueType(0) == MVT::i32 &&
2973 "find truncate to not i32 integer");
2974 if (User->getOpcode() == ISD::SELECT_CC ||
2975 User->getOpcode() == ISD::SELECT || User->getOpcode() == VEISD::CMOV)
2976 continue;
2977 break;
2978 }
2979 }
2980 return false;
2981 }
2982 return true;
2983}
2984
2985// Optimize TRUNCATE in DAG combining. Optimizing it in CUSTOM lower is
2986// sometime too early. Optimizing it in DAG pattern matching in VEInstrInfo.td
2987// is sometime too late. So, doing it at here.
2989 DAGCombinerInfo &DCI) const {
2990 assert(N->getOpcode() == ISD::TRUNCATE &&
2991 "Should be called with a TRUNCATE node");
2992
2993 SelectionDAG &DAG = DCI.DAG;
2994 SDLoc DL(N);
2995 EVT VT = N->getValueType(0);
2996
2997 // We prefer to do this when all types are legal.
2998 if (!DCI.isAfterLegalizeDAG())
2999 return SDValue();
3000
3001 // Skip combine TRUNCATE atm if the operand of TRUNCATE might be a constant.
3002 if (N->getOperand(0)->getOpcode() == ISD::SELECT_CC &&
3003 isa<ConstantSDNode>(N->getOperand(0)->getOperand(0)) &&
3004 isa<ConstantSDNode>(N->getOperand(0)->getOperand(1)))
3005 return SDValue();
3006
3007 // Check all use of this TRUNCATE.
3008 for (const SDNode *User : N->uses()) {
3009 // Make sure that we're not going to replace TRUNCATE for non i32
3010 // instructions.
3011 //
3012 // FIXME: Although we could sometimes handle this, and it does occur in
3013 // practice that one of the condition inputs to the select is also one of
3014 // the outputs, we currently can't deal with this.
3015 if (isI32Insn(User, N))
3016 continue;
3017
3018 return SDValue();
3019 }
3020
3021 SDValue SubI32 = DAG.getTargetConstant(VE::sub_i32, DL, MVT::i32);
3022 return SDValue(DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL, VT,
3023 N->getOperand(0), SubI32),
3024 0);
3025}
3026
3028 DAGCombinerInfo &DCI) const {
3029 switch (N->getOpcode()) {
3030 default:
3031 break;
3032 case ISD::SELECT:
3033 return combineSelect(N, DCI);
3034 case ISD::SELECT_CC:
3035 return combineSelectCC(N, DCI);
3036 case ISD::TRUNCATE:
3037 return combineTRUNCATE(N, DCI);
3038 }
3039
3040 return SDValue();
3041}
3042
3043//===----------------------------------------------------------------------===//
3044// VE Inline Assembly Support
3045//===----------------------------------------------------------------------===//
3046
3049 if (Constraint.size() == 1) {
3050 switch (Constraint[0]) {
3051 default:
3052 break;
3053 case 'v': // vector registers
3054 return C_RegisterClass;
3055 }
3056 }
3057 return TargetLowering::getConstraintType(Constraint);
3058}
3059
3060std::pair<unsigned, const TargetRegisterClass *>
3062 StringRef Constraint,
3063 MVT VT) const {
3064 const TargetRegisterClass *RC = nullptr;
3065 if (Constraint.size() == 1) {
3066 switch (Constraint[0]) {
3067 default:
3068 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
3069 case 'r':
3070 RC = &VE::I64RegClass;
3071 break;
3072 case 'v':
3073 RC = &VE::V64RegClass;
3074 break;
3075 }
3076 return std::make_pair(0U, RC);
3077 }
3078
3079 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
3080}
3081
3082//===----------------------------------------------------------------------===//
3083// VE Target Optimization Support
3084//===----------------------------------------------------------------------===//
3085
3087 // Specify 8 for PIC model to relieve the impact of PIC load instructions.
3088 if (isJumpTableRelative())
3089 return 8;
3090
3092}
3093
3095 EVT VT = Y.getValueType();
3096
3097 // VE doesn't have vector and not instruction.
3098 if (VT.isVector())
3099 return false;
3100
3101 // VE allows different immediate values for X and Y where ~X & Y.
3102 // Only simm7 works for X, and only mimm works for Y on VE. However, this
3103 // function is used to check whether an immediate value is OK for and-not
3104 // instruction as both X and Y. Generating additional instruction to
3105 // retrieve an immediate value is no good since the purpose of this
3106 // function is to convert a series of 3 instructions to another series of
3107 // 3 instructions with better parallelism. Therefore, we return false
3108 // for all immediate values now.
3109 // FIXME: Change hasAndNot function to have two operands to make it work
3110 // correctly with Aurora VE.
3111 if (isa<ConstantSDNode>(Y))
3112 return false;
3113
3114 // It's ok for generic registers.
3115 return true;
3116}
3117
3119 SelectionDAG &DAG) const {
3120 assert(Op.getOpcode() == ISD::EXTRACT_VECTOR_ELT && "Unknown opcode!");
3121 MVT VT = Op.getOperand(0).getSimpleValueType();
3122
3123 // Special treatment for packed V64 types.
3124 assert(VT == MVT::v512i32 || VT == MVT::v512f32);
3125 (void)VT;
3126 // Example of codes:
3127 // %packed_v = extractelt %vr, %idx / 2
3128 // %v = %packed_v >> (%idx % 2 * 32)
3129 // %res = %v & 0xffffffff
3130
3131 SDValue Vec = Op.getOperand(0);
3132 SDValue Idx = Op.getOperand(1);
3133 SDLoc DL(Op);
3134 SDValue Result = Op;
3135 if (false /* Idx->isConstant() */) {
3136 // TODO: optimized implementation using constant values
3137 } else {
3138 SDValue Const1 = DAG.getConstant(1, DL, MVT::i64);
3139 SDValue HalfIdx = DAG.getNode(ISD::SRL, DL, MVT::i64, {Idx, Const1});
3140 SDValue PackedElt =
3141 SDValue(DAG.getMachineNode(VE::LVSvr, DL, MVT::i64, {Vec, HalfIdx}), 0);
3142 SDValue AndIdx = DAG.getNode(ISD::AND, DL, MVT::i64, {Idx, Const1});
3143 SDValue Shift = DAG.getNode(ISD::XOR, DL, MVT::i64, {AndIdx, Const1});
3144 SDValue Const5 = DAG.getConstant(5, DL, MVT::i64);
3145 Shift = DAG.getNode(ISD::SHL, DL, MVT::i64, {Shift, Const5});
3146 PackedElt = DAG.getNode(ISD::SRL, DL, MVT::i64, {PackedElt, Shift});
3147 SDValue Mask = DAG.getConstant(0xFFFFFFFFL, DL, MVT::i64);
3148 PackedElt = DAG.getNode(ISD::AND, DL, MVT::i64, {PackedElt, Mask});
3149 SDValue SubI32 = DAG.getTargetConstant(VE::sub_i32, DL, MVT::i32);
3150 Result = SDValue(DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL,
3151 MVT::i32, PackedElt, SubI32),
3152 0);
3153
3154 if (Op.getSimpleValueType() == MVT::f32) {
3155 Result = DAG.getBitcast(MVT::f32, Result);
3156 } else {
3157 assert(Op.getSimpleValueType() == MVT::i32);
3158 }
3159 }
3160 return Result;
3161}
3162
3164 SelectionDAG &DAG) const {
3165 assert(Op.getOpcode() == ISD::INSERT_VECTOR_ELT && "Unknown opcode!");
3166 MVT VT = Op.getOperand(0).getSimpleValueType();
3167
3168 // Special treatment for packed V64 types.
3169 assert(VT == MVT::v512i32 || VT == MVT::v512f32);
3170 (void)VT;
3171 // The v512i32 and v512f32 starts from upper bits (0..31). This "upper
3172 // bits" required `val << 32` from C implementation's point of view.
3173 //
3174 // Example of codes:
3175 // %packed_elt = extractelt %vr, (%idx >> 1)
3176 // %shift = ((%idx & 1) ^ 1) << 5
3177 // %packed_elt &= 0xffffffff00000000 >> shift
3178 // %packed_elt |= (zext %val) << shift
3179 // %vr = insertelt %vr, %packed_elt, (%idx >> 1)
3180
3181 SDLoc DL(Op);
3182 SDValue Vec = Op.getOperand(0);
3183 SDValue Val = Op.getOperand(1);
3184 SDValue Idx = Op.getOperand(2);
3185 if (Idx.getSimpleValueType() == MVT::i32)
3186 Idx = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, Idx);
3187 if (Val.getSimpleValueType() == MVT::f32)
3188 Val = DAG.getBitcast(MVT::i32, Val);
3189 assert(Val.getSimpleValueType() == MVT::i32);
3190 Val = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, Val);
3191
3192 SDValue Result = Op;
3193 if (false /* Idx->isConstant()*/) {
3194 // TODO: optimized implementation using constant values
3195 } else {
3196 SDValue Const1 = DAG.getConstant(1, DL, MVT::i64);
3197 SDValue HalfIdx = DAG.getNode(ISD::SRL, DL, MVT::i64, {Idx, Const1});
3198 SDValue PackedElt =
3199 SDValue(DAG.getMachineNode(VE::LVSvr, DL, MVT::i64, {Vec, HalfIdx}), 0);
3200 SDValue AndIdx = DAG.getNode(ISD::AND, DL, MVT::i64, {Idx, Const1});
3201 SDValue Shift = DAG.getNode(ISD::XOR, DL, MVT::i64, {AndIdx, Const1});
3202 SDValue Const5 = DAG.getConstant(5, DL, MVT::i64);
3203 Shift = DAG.getNode(ISD::SHL, DL, MVT::i64, {Shift, Const5});
3204 SDValue Mask = DAG.getConstant(0xFFFFFFFF00000000L, DL, MVT::i64);
3205 Mask = DAG.getNode(ISD::SRL, DL, MVT::i64, {Mask, Shift});
3206 PackedElt = DAG.getNode(ISD::AND, DL, MVT::i64, {PackedElt, Mask});
3207 Val = DAG.getNode(ISD::SHL, DL, MVT::i64, {Val, Shift});
3208 PackedElt = DAG.getNode(ISD::OR, DL, MVT::i64, {PackedElt, Val});
3209 Result =
3210 SDValue(DAG.getMachineNode(VE::LSVrr_v, DL, Vec.getSimpleValueType(),
3211 {HalfIdx, PackedElt, Vec}),
3212 0);
3213 }
3214 return Result;
3215}
unsigned const MachineRegisterInfo * MRI
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Function Alias Analysis Results
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
return RetTy
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
#define LLVM_DEBUG(X)
Definition: Debug.h:101
uint64_t Addr
uint64_t Size
Symbol * Sym
Definition: ELF_riscv.cpp:479
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
#define RegName(no)
#define I(x, y, z)
Definition: MD5.cpp:58
unsigned const TargetRegisterInfo * TRI
Module.h This file contains the declarations for the Module class.
uint64_t IntrinsicInst * II
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
static CodeModel::Model getCodeModel(const PPCSubtarget &S, const TargetMachine &TM, const MachineOperand &MO)
const char LLVMTargetMachineRef TM
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file implements the StringSwitch template, which mimics a switch() statement whose cases are str...
static unsigned decideComp(EVT SrcVT, ISD::CondCode CC)
static bool isSimm7(SDValue V)
CCAssignFn * getParamCC(CallingConv::ID CallConv, bool IsVarArg)
static SDValue lowerLoadF128(SDValue Op, SelectionDAG &DAG)
static bool isMImm(SDValue V)
static SDValue prepareTS1AM(SDValue Op, SelectionDAG &DAG, SDValue &Flag, SDValue &Bits)
CCAssignFn * getReturnCC(CallingConv::ID CallConv)
static bool safeWithoutCompWithNull(EVT SrcVT, ISD::CondCode CC, bool WithCMov)
static bool isI32InsnAllUses(const SDNode *User, const SDNode *N)
static SDValue lowerLoadI1(SDValue Op, SelectionDAG &DAG)
static SDValue generateComparison(EVT VT, SDValue LHS, SDValue RHS, ISD::CondCode CC, bool WithCMov, const SDLoc &DL, SelectionDAG &DAG)
static EVT decideCompType(EVT SrcVT)
static bool isI32Insn(const SDNode *User, const SDNode *N)
static SDValue lowerFRAMEADDR(SDValue Op, SelectionDAG &DAG, const VETargetLowering &TLI, const VESubtarget *Subtarget)
static const MVT AllMaskVTs[]
static bool getUniqueInsertion(SDNode *N, unsigned &UniqueIdx)
static SDValue lowerRETURNADDR(SDValue Op, SelectionDAG &DAG, const VETargetLowering &TLI, const VESubtarget *Subtarget)
static const MVT AllVectorVTs[]
static const MVT AllPackedVTs[]
static SDValue finalizeTS1AM(SDValue Op, SelectionDAG &DAG, SDValue Data, SDValue Bits)
static SDValue lowerStoreF128(SDValue Op, SelectionDAG &DAG)
static SDValue lowerStoreI1(SDValue Op, SelectionDAG &DAG)
#define TARGET_NODE_CASE(NAME)
Value * RHS
Value * LHS
Class for arbitrary precision integers.
Definition: APInt.h:78
an instruction that atomically reads a memory location, combines it with another value,...
Definition: Instructions.h:696
BinOp getOperation() const
Definition: Instructions.h:787
This is an SDNode representing atomic operations.
LLVM Basic Block Representation.
Definition: BasicBlock.h:61
CCState - This class holds information needed while lowering arguments and return values.
void AnalyzeCallResult(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
AnalyzeCallResult - Analyze the return values of a call, incorporating info about the passed values i...
bool CheckReturn(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
CheckReturn - Analyze the return values of a function, returning true if the return can be performed ...
void AnalyzeReturn(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
AnalyzeReturn - Analyze the returned values of a return, incorporating info about the result values i...
int64_t AllocateStack(unsigned Size, Align Alignment)
AllocateStack - Allocate a chunk of stack space with the specified size and alignment.
void AnalyzeCallOperands(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
AnalyzeCallOperands - Analyze the outgoing arguments to a call, incorporating info about the passed v...
uint64_t getStackSize() const
Returns the size of the currently allocated portion of the stack.
void AnalyzeFormalArguments(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
AnalyzeFormalArguments - Analyze an array of argument values, incorporating info about the formals in...
CCValAssign - Represent assignment of one arg/retval to a location.
bool isRegLoc() const
Register getLocReg() const
LocInfo getLocInfo() const
bool needsCustom() const
bool isMemLoc() const
bool isExtInLoc() const
int64_t getLocMemOffset() const
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:110
A debug info location.
Definition: DebugLoc.h:33
unsigned size() const
Definition: DenseMap.h:99
unsigned getAddressSpace() const
Definition: GlobalValue.h:205
Common base class shared among various IRBuilders.
Definition: IRBuilder.h:91
FenceInst * CreateFence(AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System, const Twine &Name="")
Definition: IRBuilder.h:1839
bool hasAtomicStore() const LLVM_READONLY
Return true if this atomic instruction stores to memory.
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
This class is used to represent ISD::LOAD nodes.
const SDValue & getBasePtr() const
const SDValue & getOffset() const
static const MCBinaryExpr * createSub(const MCExpr *LHS, const MCExpr *RHS, MCContext &Ctx)
Definition: MCExpr.h:617
Context object for machine code objects.
Definition: MCContext.h:83
MCSymbol * getOrCreateSymbol(const Twine &Name)
Lookup the symbol inside with the specified Name.
Definition: MCContext.cpp:213
Base class for the full range of assembler expressions which are needed for parsing.
Definition: MCExpr.h:34
static const MCSymbolRefExpr * create(const MCSymbol *Symbol, MCContext &Ctx)
Definition: MCExpr.h:393
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
Definition: MCSymbol.h:41
Machine Value Type.
SimpleValueType SimpleTy
uint64_t getScalarSizeInBits() const
unsigned getVectorNumElements() const
static auto integer_valuetypes()
static auto vector_valuetypes()
MVT getVectorElementType() const
static MVT getIntegerVT(unsigned BitWidth)
static auto fp_valuetypes()
void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
bool isEHPad() const
Returns true if the block is a landing pad.
MCSymbol * getSymbol() const
Return the MCSymbol for this basic block.
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
void removeSuccessor(MachineBasicBlock *Succ, bool NormalizeSuccProbs=false)
Remove successor from the successors list of this MachineBasicBlock.
succ_reverse_iterator succ_rbegin()
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
succ_reverse_iterator succ_rend()
void setMachineBlockAddressTaken()
Set this block to indicate that its address is used as something other than the target of a terminato...
void setIsEHPad(bool V=true)
Indicates the block is a landing pad.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
void setFrameAddressIsTaken(bool T)
void setReturnAddressIsTaken(bool s)
int getFunctionContextIndex() const
Return the index for the function context object.
unsigned getFunctionNumber() const
getFunctionNumber - Return a unique ID for the current function.
MachineJumpTableInfo * getOrCreateJumpTableInfo(unsigned JTEntryKind)
getOrCreateJumpTableInfo - Get the JumpTableInfo for this function, if it does already exist,...
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
void push_back(MachineBasicBlock *MBB)
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
bool hasCallSiteLandingPad(MCSymbol *Sym)
Return true if the landing pad Eh symbol has an associated call site.
Register addLiveIn(MCRegister PReg, const TargetRegisterClass *RC)
addLiveIn - Add the specified physical register as a live-in value and create a corresponding virtual...
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *BB=nullptr, std::optional< UniqueBBID > BBID=std::nullopt)
CreateMachineBasicBlock - Allocate a new MachineBasicBlock.
void insert(iterator MBBI, MachineBasicBlock *MBB)
SmallVectorImpl< unsigned > & getCallSiteLandingPad(MCSymbol *Sym)
Get the call site indexes for a landing pad EH symbol.
const MachineInstrBuilder & setMemRefs(ArrayRef< MachineMemOperand * > MMOs) const
const MachineInstrBuilder & addExternalSymbol(const char *FnName, unsigned TargetFlags=0) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addRegMask(const uint32_t *Mask) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addJumpTableIndex(unsigned Idx, unsigned TargetFlags=0) const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
Representation of each machine instruction.
Definition: MachineInstr.h:69
unsigned createJumpTableIndex(const std::vector< MachineBasicBlock * > &DestBBs)
createJumpTableIndex - Create a new jump table.
@ EK_Custom32
EK_Custom32 - Each entry is a 32-bit value that is custom lowered by the TargetLowering::LowerCustomJ...
@ EK_BlockAddress
EK_BlockAddress - Each entry is a plain address of block, e.g.: .word LBB123.
Flags
Flags values. These may be or'd together.
@ MOVolatile
The memory access is volatile.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
const MCPhysReg * getCalleeSavedRegs() const
Returns list of callee saved registers.
Align getAlign() const
bool isVolatile() const
const MachinePointerInfo & getPointerInfo() const
const SDValue & getChain() const
EVT getMemoryVT() const
Return the type of the in-memory value.
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
bool isUndef() const
SDNode * getNode() const
get the SDNode which holds the desired result
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
const SDValue & getOperand(unsigned i) const
MVT getSimpleValueType() const
Return the simple ValueType of the referenced return value.
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
Definition: SelectionDAG.h:227
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
Definition: SelectionDAG.h:734
SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s),...
SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
SDValue getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, SDValue Chain, SDValue Ptr, SDValue Val, MachineMemOperand *MMO)
Gets a node for an atomic op, produces result (if relevant) and chain and takes 2 operands.
SDValue getTargetJumpTable(int JTI, EVT VT, unsigned TargetFlags=0)
Definition: SelectionDAG.h:744
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it's not CSE'd).
SDValue getBitcast(EVT VT, SDValue V)
Return a bitcast using the SDLoc of the value operand, and casting to the provided type.
const DataLayout & getDataLayout() const
Definition: SelectionDAG.h:486
SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
SDValue getGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, bool isTargetGA=false, unsigned TargetFlags=0)
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)
Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...
SDValue getRegister(unsigned Reg, EVT VT)
const TargetMachine & getTarget() const
Definition: SelectionDAG.h:487
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, unsigned Reg, SDValue N)
Definition: SelectionDAG.h:785
SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
SDValue getValueType(EVT)
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
Definition: SelectionDAG.h:688
SDValue getTargetBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, unsigned TargetFlags=0)
Definition: SelectionDAG.h:780
MachineFunction & getMachineFunction() const
Definition: SelectionDAG.h:481
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, unsigned Reg, EVT VT)
Definition: SelectionDAG.h:811
SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
SDValue getRegisterMask(const uint32_t *RegMask)
LLVMContext * getContext() const
Definition: SelectionDAG.h:499
SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned TargetFlags=0)
SDValue getTargetConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offset=0, unsigned TargetFlags=0)
Definition: SelectionDAG.h:751
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
Definition: SelectionDAG.h:568
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:344
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:479
bool empty() const
Definition: SmallVector.h:94
size_t size() const
Definition: SmallVector.h:91
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:586
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
This class is used to represent ISD::STORE nodes.
const SDValue & getBasePtr() const
const SDValue & getOffset() const
const SDValue & getValue() const
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
constexpr size_t size() const
size - Get the string size.
Definition: StringRef.h:137
constexpr const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Definition: StringRef.h:131
A switch()-like statement whose cases are string literals.
Definition: StringSwitch.h:44
StringSwitch & Case(StringLiteral S, T Value)
Definition: StringSwitch.h:69
R Default(T Value)
Definition: StringSwitch.h:182
Information about stack frame layout on the target.
Align getStackAlign() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
TargetInstrInfo - Interface to description of machine instruction set.
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
LegalizeAction
This enum indicates whether operations are valid for a target, and if not, what action should be used...
virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const
Return the register class that should be used for the specified value type.
void setMinStackArgumentAlignment(Align Alignment)
Set the minimum stack alignment of an argument.
virtual unsigned getMinimumJumpTableEntries() const
Return lower limit for number of blocks in a jump table.
const TargetMachine & getTargetMachine() const
void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits)
Set the maximum atomic operation size supported by the backend.
void setMinFunctionAlignment(Align Alignment)
Set the target's minimum function alignment.
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose.
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type.
void setSupportsUnalignedAtomics(bool UnalignedSupported)
Sets whether unaligned atomic operations are supported.
virtual bool isJumpTableRelative() const
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
void setLibcallName(RTLIB::Libcall Call, const char *Name)
Rename the default libcall routine name for the specified libcall.
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
void setMinCmpXchgSizeInBits(unsigned SizeInBits)
Sets the minimum cmpxchg or ll/sc size supported by the backend.
void setStackPointerRegisterToSaveRestore(Register R)
If set to a physical register, this specifies the register that llvm.savestack/llvm....
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
void setTargetDAGCombine(ArrayRef< ISD::NodeType > NTs)
Targets should invoke this method for each target independent node that they want to provide a custom...
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
std::vector< ArgListEntry > ArgListTy
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
This function lowers an abstract call to a function into an actual call.
bool isPositionIndependent() const
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
bool verifyReturnAddressArgumentIsConstant(SDValue Op, SelectionDAG &DAG) const
virtual unsigned getJumpTableEncoding() const
Return the entry encoding for a jump table in the current function.
Primary interface to the complete machine description for the target machine.
Definition: TargetMachine.h:77
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
static Type * getVoidTy(LLVMContext &C)
Value * getOperand(unsigned i) const
Definition: User.h:169
SDValue getBroadcast(EVT ResultVT, SDValue Scalar, SDValue AVL) const
SDValue getNode(unsigned OC, SDVTList VTL, ArrayRef< SDValue > OpV, std::optional< SDNodeFlags > Flags=std::nullopt) const
getNode {
Definition: VECustomDAG.h:156
SDValue getUNDEF(EVT VT) const
Definition: VECustomDAG.h:180
SDValue getConstant(uint64_t Val, EVT VT, bool IsTarget=false, bool IsOpaque=false) const
bool hasBP(const MachineFunction &MF) const
Register getGlobalBaseReg(MachineFunction *MF) const
} Optimization
@ VK_VE_GOTOFF_HI32
Definition: VEMCExpr.h:34
@ VK_VE_GOTOFF_LO32
Definition: VEMCExpr.h:35
bool enableVPU() const
Definition: VESubtarget.h:65
unsigned getRsaSize() const
Get the size of RSA, return address, and frame pointer as described in VEFrameLowering....
Definition: VESubtarget.h:79
const VEInstrInfo * getInstrInfo() const override
Definition: VESubtarget.h:51
const VEFrameLowering * getFrameLowering() const override
Definition: VESubtarget.h:52
const VERegisterInfo * getRegisterInfo() const override
Definition: VESubtarget.h:55
SDValue splitMaskArithmetic(SDValue Op, SelectionDAG &DAG) const
SDValue lowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const
std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const override
Given a physical register constraint (e.g.
const char * getTargetNodeName(unsigned Opcode) const override
This method returns the name of a target specific DAG node.
bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override
Return true if folding a constant offset with the given GlobalAddress is legal.
SDValue lowerToVVP(SDValue Op, SelectionDAG &DAG) const
} Custom Inserter
SDValue lowerJumpTable(SDValue Op, SelectionDAG &DAG) const
SDValue getPICJumpTableRelocBase(SDValue Table, SelectionDAG &DAG) const override
Returns relocation base for the given PIC jumptable.
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const override
getSetCCResultType - Return the ISD::SETCC ValueType
SDValue lowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const
bool hasAndNot(SDValue Y) const override
Return true if the target has a bitwise and-not operation: X = ~A & B This can be used to simplify se...
SDValue lowerVAARG(SDValue Op, SelectionDAG &DAG) const
SDValue combineSelect(SDNode *N, DAGCombinerInfo &DCI) const
bool isFPImmLegal(const APFloat &Imm, EVT VT, bool ForCodeSize) const override
isFPImmLegal - Returns true if the target can instruction select the specified FP immediate natively.
VETargetLowering(const TargetMachine &TM, const VESubtarget &STI)
Instruction * emitLeadingFence(IRBuilderBase &Builder, Instruction *Inst, AtomicOrdering Ord) const override
Custom Lower {.
SDValue lowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG) const
bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF, bool isVarArg, const SmallVectorImpl< ISD::OutputArg > &ArgsFlags, LLVMContext &Context) const override
This hook should be implemented to check whether the return values described by the Outs array can fi...
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
This callback is invoked for operations that are unsupported by the target, which are registered to u...
SDValue lowerLOAD(SDValue Op, SelectionDAG &DAG) const
MachineBasicBlock * emitEHSjLjLongJmp(MachineInstr &MI, MachineBasicBlock *MBB) const
SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override
} VVPLowering
TargetLoweringBase::AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override
Returns how the IR-level AtomicExpand pass should expand the given AtomicRMW, if at all.
SDValue lowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const
SDValue combineSelectCC(SDNode *N, DAGCombinerInfo &DCI) const
SDValue lowerEH_SJLJ_SETUP_DISPATCH(SDValue Op, SelectionDAG &DAG) const
unsigned getMinimumJumpTableEntries() const override
} Inline Assembly
SDValue lowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const
SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, const SmallVectorImpl< SDValue > &OutVals, const SDLoc &dl, SelectionDAG &DAG) const override
This hook must be implemented to lower outgoing return values, described by the Outs array,...
MachineBasicBlock * emitSjLjDispatchBlock(MachineInstr &MI, MachineBasicBlock *BB) const
Instruction * emitTrailingFence(IRBuilderBase &Builder, Instruction *Inst, AtomicOrdering Ord) const override
Register prepareMBB(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, MachineBasicBlock *TargetBB, const DebugLoc &DL) const
void setupEntryBlockForSjLj(MachineInstr &MI, MachineBasicBlock *MBB, MachineBasicBlock *DispatchBB, int FI, int Offset) const
SDValue lowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const
MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *MBB) const override
Custom Inserter {.
MachineBasicBlock * emitEHSjLjSetJmp(MachineInstr &MI, MachineBasicBlock *MBB) const
bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AS, Align A, MachineMemOperand::Flags Flags, unsigned *Fast) const override
Returns true if the target allows unaligned memory accesses of the specified type.
SDValue lowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const
SDValue lowerSTORE(SDValue Op, SelectionDAG &DAG) const
TargetLoweringBase::LegalizeAction getCustomOperationAction(SDNode &) const override
Custom Lower {.
SDValue makeAddress(SDValue Op, SelectionDAG &DAG) const
SDValue lowerConstantPool(SDValue Op, SelectionDAG &DAG) const
SDValue lowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const
SDValue legalizeInternalVectorOp(SDValue Op, SelectionDAG &DAG) const
Register prepareSymbol(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, StringRef Symbol, const DebugLoc &DL, bool IsLocal, bool IsCall) const
SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::InputArg > &Ins, const SDLoc &dl, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals) const override
This hook must be implemented to lower the incoming (formal) arguments, described by the Ins array,...
void ReplaceNodeResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override
} Custom Lower
SDValue lowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const
SDValue LowerCall(TargetLowering::CallLoweringInfo &CLI, SmallVectorImpl< SDValue > &InVals) const override
This hook must be implemented to lower calls into the specified DAG.
SDValue withTargetFlags(SDValue Op, unsigned TF, SelectionDAG &DAG) const
} Custom DAGCombine
SDValue combineTRUNCATE(SDNode *N, DAGCombinerInfo &DCI) const
const MCExpr * LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI, const MachineBasicBlock *MBB, unsigned Uid, MCContext &Ctx) const override
SDValue lowerVASTART(SDValue Op, SelectionDAG &DAG) const
unsigned getJumpTableEncoding() const override
JumpTable for VE.
SDValue lowerATOMIC_SWAP(SDValue Op, SelectionDAG &DAG) const
SDValue lowerBlockAddress(SDValue Op, SelectionDAG &DAG) const
Register getRegisterByName(const char *RegName, LLT VT, const MachineFunction &MF) const override
Return the register ID of the name passed in.
SDValue makeHiLoPair(SDValue Op, unsigned HiTF, unsigned LoTF, SelectionDAG &DAG) const
ConstraintType getConstraintType(StringRef Constraint) const override
Inline Assembly {.
SDValue lowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const
SDValue lowerToTLSGeneralDynamicModel(SDValue Op, SelectionDAG &DAG) const
LLVM Value Representation.
Definition: Value.h:74
iterator_range< use_iterator > uses()
Definition: Value.h:376
self_iterator getIterator()
Definition: ilist_node.h:132
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ PreserveAll
Used for runtime calls that preserves (almost) all registers.
Definition: CallingConv.h:66
@ Fast
Attempts to make calls as fast as possible (e.g.
Definition: CallingConv.h:41
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
Definition: ISDOpcodes.h:40
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
Definition: ISDOpcodes.h:779
@ STACKRESTORE
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain.
Definition: ISDOpcodes.h:1169
@ STACKSAVE
STACKSAVE - STACKSAVE has one operand, an input chain.
Definition: ISDOpcodes.h:1165
@ CTLZ_ZERO_UNDEF
Definition: ISDOpcodes.h:752
@ MLOAD
Masked load and store - consecutive vector load and store operations with additional mask operand tha...
Definition: ISDOpcodes.h:1330
@ VECREDUCE_SMIN
Definition: ISDOpcodes.h:1415
@ EH_SJLJ_LONGJMP
OUTCHAIN = EH_SJLJ_LONGJMP(INCHAIN, buffer) This corresponds to the eh.sjlj.longjmp intrinsic.
Definition: ISDOpcodes.h:153
@ SMUL_LOHI
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
Definition: ISDOpcodes.h:257
@ ATOMIC_LOAD_NAND
Definition: ISDOpcodes.h:1312
@ BSWAP
Byte Swap and Counting operators.
Definition: ISDOpcodes.h:743
@ VAEND
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE.
Definition: ISDOpcodes.h:1198
@ ConstantFP
Definition: ISDOpcodes.h:77
@ ATOMIC_LOAD_MAX
Definition: ISDOpcodes.h:1314
@ ATOMIC_LOAD_UMIN
Definition: ISDOpcodes.h:1315
@ ADD
Simple integer binary arithmetic operators.
Definition: ISDOpcodes.h:246
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
Definition: ISDOpcodes.h:1074
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
Definition: ISDOpcodes.h:813
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
Definition: ISDOpcodes.h:497
@ EH_SJLJ_SETUP_DISPATCH
OUTCHAIN = EH_SJLJ_SETUP_DISPATCH(INCHAIN) The target initializes the dispatch table here.
Definition: ISDOpcodes.h:157
@ GlobalAddress
Definition: ISDOpcodes.h:78
@ ATOMIC_CMP_SWAP_WITH_SUCCESS
Val, Success, OUTCHAIN = ATOMIC_CMP_SWAP_WITH_SUCCESS(INCHAIN, ptr, cmp, swap) N.b.
Definition: ISDOpcodes.h:1297
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
Definition: ISDOpcodes.h:820
@ MEMBARRIER
MEMBARRIER - Compiler barrier only; generate a no-op.
Definition: ISDOpcodes.h:1271
@ ATOMIC_FENCE
OUTCHAIN = ATOMIC_FENCE(INCHAIN, ordering, scope) This corresponds to the fence instruction.
Definition: ISDOpcodes.h:1276
@ SDIVREM
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
Definition: ISDOpcodes.h:262
@ VECREDUCE_SMAX
Definition: ISDOpcodes.h:1414
@ FP16_TO_FP
FP16_TO_FP, FP_TO_FP16 - These operators are used to perform promotions and truncation for half-preci...
Definition: ISDOpcodes.h:943
@ ATOMIC_LOAD_OR
Definition: ISDOpcodes.h:1310
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
Definition: ISDOpcodes.h:933
@ ATOMIC_LOAD_XOR
Definition: ISDOpcodes.h:1311
@ GlobalTLSAddress
Definition: ISDOpcodes.h:79
@ SIGN_EXTEND
Conversion operators.
Definition: ISDOpcodes.h:804
@ FNEG
Perform various unary floating-point operations inspired by libm.
Definition: ISDOpcodes.h:960
@ BR_CC
BR_CC - Conditional branch.
Definition: ISDOpcodes.h:1120
@ ATOMIC_LOAD_MIN
Definition: ISDOpcodes.h:1313
@ BR_JT
BR_JT - Jumptable branch.
Definition: ISDOpcodes.h:1099
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
Definition: ISDOpcodes.h:756
@ VECREDUCE_UMAX
Definition: ISDOpcodes.h:1416
@ VACOPY
VACOPY - VACOPY has 5 operands: an input chain, a destination pointer, a source pointer,...
Definition: ISDOpcodes.h:1194
@ CopyFromReg
CopyFromReg - This node indicates that the input value is a virtual or physical register that is defi...
Definition: ISDOpcodes.h:215
@ VECREDUCE_ADD
Integer reductions may have a result type larger than the vector element type.
Definition: ISDOpcodes.h:1409
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
Definition: ISDOpcodes.h:673
@ SHL
Shift and rotation operations.
Definition: ISDOpcodes.h:734
@ ATOMIC_LOAD_CLR
Definition: ISDOpcodes.h:1309
@ ATOMIC_LOAD_AND
Definition: ISDOpcodes.h:1308
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
Definition: ISDOpcodes.h:549
@ CopyToReg
CopyToReg - This node has three operands: a chain, a register number to set to this value,...
Definition: ISDOpcodes.h:209
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
Definition: ISDOpcodes.h:810
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
Definition: ISDOpcodes.h:771
@ ATOMIC_CMP_SWAP
Val, OUTCHAIN = ATOMIC_CMP_SWAP(INCHAIN, ptr, cmp, swap) For double-word atomic operations: ValLo,...
Definition: ISDOpcodes.h:1291
@ ATOMIC_LOAD_UMAX
Definition: ISDOpcodes.h:1316
@ FMINNUM
FMINNUM/FMAXNUM - Perform floating-point minimum or maximum on two values.
Definition: ISDOpcodes.h:1008
@ DYNAMIC_STACKALLOC
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary.
Definition: ISDOpcodes.h:1084
@ ConstantPool
Definition: ISDOpcodes.h:82
@ SMIN
[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.
Definition: ISDOpcodes.h:696
@ VECREDUCE_UMIN
Definition: ISDOpcodes.h:1417
@ FRAMEADDR
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG.
Definition: ISDOpcodes.h:100
@ ATOMIC_LOAD_ADD
Definition: ISDOpcodes.h:1306
@ ATOMIC_LOAD_SUB
Definition: ISDOpcodes.h:1307
@ AND
Bitwise operators - logical and, logical or, logical xor.
Definition: ISDOpcodes.h:708
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
Definition: ISDOpcodes.h:190
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
Definition: ISDOpcodes.h:538
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
Definition: ISDOpcodes.h:52
@ ATOMIC_SWAP
Val, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amt) Val, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN,...
Definition: ISDOpcodes.h:1305
@ EH_SJLJ_SETJMP
RESULT, OUTCHAIN = EH_SJLJ_SETJMP(INCHAIN, buffer) This corresponds to the eh.sjlj....
Definition: ISDOpcodes.h:147
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
Definition: ISDOpcodes.h:816
@ VAARG
VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE, and the alignment.
Definition: ISDOpcodes.h:1189
@ BRCOND
BRCOND - Conditional branch.
Definition: ISDOpcodes.h:1113
@ BlockAddress
Definition: ISDOpcodes.h:84
@ SHL_PARTS
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.
Definition: ISDOpcodes.h:793
@ AssertSext
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
Definition: ISDOpcodes.h:61
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
Definition: ISDOpcodes.h:507
@ AssertZext
Definition: ISDOpcodes.h:62
@ BUILD_VECTOR
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...
Definition: ISDOpcodes.h:529
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
Definition: ISDOpcodes.h:1578
bool isVPOpcode(unsigned Opcode)
Whether this is a vector-predicated Opcode.
@ Dead
Unused definition.
@ System
Synchronized with respect to all concurrently executing threads.
Definition: LLVMContext.h:57
CondCode
Definition: VE.h:42
@ CC_ILE
Definition: VE.h:49