LLVM  13.0.0git
RISCVISelDAGToDAG.cpp
Go to the documentation of this file.
1 //===-- RISCVISelDAGToDAG.cpp - A dag to dag inst selector for RISCV ------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines an instruction selector for the RISCV target.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "RISCVISelDAGToDAG.h"
16 #include "RISCVISelLowering.h"
18 #include "llvm/IR/IntrinsicsRISCV.h"
19 #include "llvm/Support/Alignment.h"
20 #include "llvm/Support/Debug.h"
21 #include "llvm/Support/KnownBits.h"
24 
25 using namespace llvm;
26 
27 #define DEBUG_TYPE "riscv-isel"
28 
29 namespace llvm {
30 namespace RISCV {
31 #define GET_RISCVVSSEGTable_IMPL
32 #define GET_RISCVVLSEGTable_IMPL
33 #define GET_RISCVVLXSEGTable_IMPL
34 #define GET_RISCVVSXSEGTable_IMPL
35 #define GET_RISCVVLETable_IMPL
36 #define GET_RISCVVSETable_IMPL
37 #define GET_RISCVVLXTable_IMPL
38 #define GET_RISCVVSXTable_IMPL
39 #include "RISCVGenSearchableTables.inc"
40 } // namespace RISCV
41 } // namespace llvm
42 
44  doPeepholeLoadStoreADDI();
45 }
46 
47 static SDNode *selectImm(SelectionDAG *CurDAG, const SDLoc &DL, int64_t Imm,
48  MVT XLenVT) {
50 
51  SDNode *Result = nullptr;
52  SDValue SrcReg = CurDAG->getRegister(RISCV::X0, XLenVT);
53  for (RISCVMatInt::Inst &Inst : Seq) {
54  SDValue SDImm = CurDAG->getTargetConstant(Inst.Imm, DL, XLenVT);
55  if (Inst.Opc == RISCV::LUI)
56  Result = CurDAG->getMachineNode(RISCV::LUI, DL, XLenVT, SDImm);
57  else
58  Result = CurDAG->getMachineNode(Inst.Opc, DL, XLenVT, SrcReg, SDImm);
59 
60  // Only the first instruction has X0 as its source.
61  SrcReg = SDValue(Result, 0);
62  }
63 
64  return Result;
65 }
66 
68  unsigned RegClassID, unsigned SubReg0) {
69  assert(Regs.size() >= 2 && Regs.size() <= 8);
70 
71  SDLoc DL(Regs[0]);
73 
74  Ops.push_back(CurDAG.getTargetConstant(RegClassID, DL, MVT::i32));
75 
76  for (unsigned I = 0; I < Regs.size(); ++I) {
77  Ops.push_back(Regs[I]);
78  Ops.push_back(CurDAG.getTargetConstant(SubReg0 + I, DL, MVT::i32));
79  }
80  SDNode *N =
81  CurDAG.getMachineNode(TargetOpcode::REG_SEQUENCE, DL, MVT::Untyped, Ops);
82  return SDValue(N, 0);
83 }
84 
86  unsigned NF) {
87  static const unsigned RegClassIDs[] = {
88  RISCV::VRN2M1RegClassID, RISCV::VRN3M1RegClassID, RISCV::VRN4M1RegClassID,
89  RISCV::VRN5M1RegClassID, RISCV::VRN6M1RegClassID, RISCV::VRN7M1RegClassID,
90  RISCV::VRN8M1RegClassID};
91 
92  return createTupleImpl(CurDAG, Regs, RegClassIDs[NF - 2], RISCV::sub_vrm1_0);
93 }
94 
96  unsigned NF) {
97  static const unsigned RegClassIDs[] = {RISCV::VRN2M2RegClassID,
98  RISCV::VRN3M2RegClassID,
99  RISCV::VRN4M2RegClassID};
100 
101  return createTupleImpl(CurDAG, Regs, RegClassIDs[NF - 2], RISCV::sub_vrm2_0);
102 }
103 
105  unsigned NF) {
106  return createTupleImpl(CurDAG, Regs, RISCV::VRN2M4RegClassID,
107  RISCV::sub_vrm4_0);
108 }
109 
111  unsigned NF, RISCVII::VLMUL LMUL) {
112  switch (LMUL) {
113  default:
114  llvm_unreachable("Invalid LMUL.");
119  return createM1Tuple(CurDAG, Regs, NF);
121  return createM2Tuple(CurDAG, Regs, NF);
123  return createM4Tuple(CurDAG, Regs, NF);
124  }
125 }
126 
128  SDNode *Node, unsigned SEW, const SDLoc &DL, unsigned CurOp, bool IsMasked,
129  bool IsStridedOrIndexed, SmallVectorImpl<SDValue> &Operands, MVT *IndexVT) {
130  SDValue Chain = Node->getOperand(0);
131  SDValue Glue;
132 
133  SDValue Base;
134  SelectBaseAddr(Node->getOperand(CurOp++), Base);
135  Operands.push_back(Base); // Base pointer.
136 
137  if (IsStridedOrIndexed) {
138  Operands.push_back(Node->getOperand(CurOp++)); // Index.
139  if (IndexVT)
140  *IndexVT = Operands.back()->getSimpleValueType(0);
141  }
142 
143  if (IsMasked) {
144  // Mask needs to be copied to V0.
145  SDValue Mask = Node->getOperand(CurOp++);
146  Chain = CurDAG->getCopyToReg(Chain, DL, RISCV::V0, Mask, SDValue());
147  Glue = Chain.getValue(1);
148  Operands.push_back(CurDAG->getRegister(RISCV::V0, Mask.getValueType()));
149  }
150  SDValue VL;
151  selectVLOp(Node->getOperand(CurOp++), VL);
152  Operands.push_back(VL);
153 
154  MVT XLenVT = Subtarget->getXLenVT();
155  SDValue SEWOp = CurDAG->getTargetConstant(Log2_32(SEW), DL, XLenVT);
156  Operands.push_back(SEWOp);
157 
158  Operands.push_back(Chain); // Chain.
159  if (Glue)
160  Operands.push_back(Glue);
161 }
162 
163 void RISCVDAGToDAGISel::selectVLSEG(SDNode *Node, bool IsMasked,
164  bool IsStrided) {
165  SDLoc DL(Node);
166  unsigned NF = Node->getNumValues() - 1;
167  MVT VT = Node->getSimpleValueType(0);
168  unsigned ScalarSize = VT.getScalarSizeInBits();
170 
171  unsigned CurOp = 2;
173  if (IsMasked) {
174  SmallVector<SDValue, 8> Regs(Node->op_begin() + CurOp,
175  Node->op_begin() + CurOp + NF);
176  SDValue MaskedOff = createTuple(*CurDAG, Regs, NF, LMUL);
177  Operands.push_back(MaskedOff);
178  CurOp += NF;
179  }
180 
181  addVectorLoadStoreOperands(Node, ScalarSize, DL, CurOp, IsMasked, IsStrided,
182  Operands);
183 
184  const RISCV::VLSEGPseudo *P =
185  RISCV::getVLSEGPseudo(NF, IsMasked, IsStrided, /*FF*/ false, ScalarSize,
186  static_cast<unsigned>(LMUL));
189 
190  if (auto *MemOp = dyn_cast<MemSDNode>(Node))
191  CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
192 
193  SDValue SuperReg = SDValue(Load, 0);
194  for (unsigned I = 0; I < NF; ++I) {
195  unsigned SubRegIdx = RISCVTargetLowering::getSubregIndexByMVT(VT, I);
196  ReplaceUses(SDValue(Node, I),
197  CurDAG->getTargetExtractSubreg(SubRegIdx, DL, VT, SuperReg));
198  }
199 
200  ReplaceUses(SDValue(Node, NF), SDValue(Load, 1));
201  CurDAG->RemoveDeadNode(Node);
202 }
203 
204 void RISCVDAGToDAGISel::selectVLSEGFF(SDNode *Node, bool IsMasked) {
205  SDLoc DL(Node);
206  unsigned NF = Node->getNumValues() - 2; // Do not count VL and Chain.
207  MVT VT = Node->getSimpleValueType(0);
208  MVT XLenVT = Subtarget->getXLenVT();
209  unsigned ScalarSize = VT.getScalarSizeInBits();
211 
212  unsigned CurOp = 2;
214  if (IsMasked) {
215  SmallVector<SDValue, 8> Regs(Node->op_begin() + CurOp,
216  Node->op_begin() + CurOp + NF);
217  SDValue MaskedOff = createTuple(*CurDAG, Regs, NF, LMUL);
218  Operands.push_back(MaskedOff);
219  CurOp += NF;
220  }
221 
222  addVectorLoadStoreOperands(Node, ScalarSize, DL, CurOp, IsMasked,
223  /*IsStridedOrIndexed*/ false, Operands);
224 
225  const RISCV::VLSEGPseudo *P =
226  RISCV::getVLSEGPseudo(NF, IsMasked, /*Strided*/ false, /*FF*/ true,
227  ScalarSize, static_cast<unsigned>(LMUL));
230  SDNode *ReadVL = CurDAG->getMachineNode(RISCV::PseudoReadVL, DL, XLenVT,
231  /*Glue*/ SDValue(Load, 2));
232 
233  if (auto *MemOp = dyn_cast<MemSDNode>(Node))
234  CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
235 
236  SDValue SuperReg = SDValue(Load, 0);
237  for (unsigned I = 0; I < NF; ++I) {
238  unsigned SubRegIdx = RISCVTargetLowering::getSubregIndexByMVT(VT, I);
239  ReplaceUses(SDValue(Node, I),
240  CurDAG->getTargetExtractSubreg(SubRegIdx, DL, VT, SuperReg));
241  }
242 
243  ReplaceUses(SDValue(Node, NF), SDValue(ReadVL, 0)); // VL
244  ReplaceUses(SDValue(Node, NF + 1), SDValue(Load, 1)); // Chain
245  CurDAG->RemoveDeadNode(Node);
246 }
247 
248 void RISCVDAGToDAGISel::selectVLXSEG(SDNode *Node, bool IsMasked,
249  bool IsOrdered) {
250  SDLoc DL(Node);
251  unsigned NF = Node->getNumValues() - 1;
252  MVT VT = Node->getSimpleValueType(0);
253  unsigned ScalarSize = VT.getScalarSizeInBits();
255 
256  unsigned CurOp = 2;
258  if (IsMasked) {
259  SmallVector<SDValue, 8> Regs(Node->op_begin() + CurOp,
260  Node->op_begin() + CurOp + NF);
261  SDValue MaskedOff = createTuple(*CurDAG, Regs, NF, LMUL);
262  Operands.push_back(MaskedOff);
263  CurOp += NF;
264  }
265 
266  MVT IndexVT;
267  addVectorLoadStoreOperands(Node, ScalarSize, DL, CurOp, IsMasked,
268  /*IsStridedOrIndexed*/ true, Operands, &IndexVT);
269 
271  "Element count mismatch");
272 
273  RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
274  unsigned IndexScalarSize = IndexVT.getScalarSizeInBits();
275  const RISCV::VLXSEGPseudo *P = RISCV::getVLXSEGPseudo(
276  NF, IsMasked, IsOrdered, IndexScalarSize, static_cast<unsigned>(LMUL),
277  static_cast<unsigned>(IndexLMUL));
280 
281  if (auto *MemOp = dyn_cast<MemSDNode>(Node))
282  CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
283 
284  SDValue SuperReg = SDValue(Load, 0);
285  for (unsigned I = 0; I < NF; ++I) {
286  unsigned SubRegIdx = RISCVTargetLowering::getSubregIndexByMVT(VT, I);
287  ReplaceUses(SDValue(Node, I),
288  CurDAG->getTargetExtractSubreg(SubRegIdx, DL, VT, SuperReg));
289  }
290 
291  ReplaceUses(SDValue(Node, NF), SDValue(Load, 1));
292  CurDAG->RemoveDeadNode(Node);
293 }
294 
295 void RISCVDAGToDAGISel::selectVSSEG(SDNode *Node, bool IsMasked,
296  bool IsStrided) {
297  SDLoc DL(Node);
298  unsigned NF = Node->getNumOperands() - 4;
299  if (IsStrided)
300  NF--;
301  if (IsMasked)
302  NF--;
303  MVT VT = Node->getOperand(2)->getSimpleValueType(0);
304  unsigned ScalarSize = VT.getScalarSizeInBits();
306  SmallVector<SDValue, 8> Regs(Node->op_begin() + 2, Node->op_begin() + 2 + NF);
307  SDValue StoreVal = createTuple(*CurDAG, Regs, NF, LMUL);
308 
310  Operands.push_back(StoreVal);
311  unsigned CurOp = 2 + NF;
312 
313  addVectorLoadStoreOperands(Node, ScalarSize, DL, CurOp, IsMasked, IsStrided,
314  Operands);
315 
316  const RISCV::VSSEGPseudo *P = RISCV::getVSSEGPseudo(
317  NF, IsMasked, IsStrided, ScalarSize, static_cast<unsigned>(LMUL));
319  CurDAG->getMachineNode(P->Pseudo, DL, Node->getValueType(0), Operands);
320 
321  if (auto *MemOp = dyn_cast<MemSDNode>(Node))
322  CurDAG->setNodeMemRefs(Store, {MemOp->getMemOperand()});
323 
324  ReplaceNode(Node, Store);
325 }
326 
327 void RISCVDAGToDAGISel::selectVSXSEG(SDNode *Node, bool IsMasked,
328  bool IsOrdered) {
329  SDLoc DL(Node);
330  unsigned NF = Node->getNumOperands() - 5;
331  if (IsMasked)
332  --NF;
333  MVT VT = Node->getOperand(2)->getSimpleValueType(0);
334  unsigned ScalarSize = VT.getScalarSizeInBits();
336  SmallVector<SDValue, 8> Regs(Node->op_begin() + 2, Node->op_begin() + 2 + NF);
337  SDValue StoreVal = createTuple(*CurDAG, Regs, NF, LMUL);
338 
340  Operands.push_back(StoreVal);
341  unsigned CurOp = 2 + NF;
342 
343  MVT IndexVT;
344  addVectorLoadStoreOperands(Node, ScalarSize, DL, CurOp, IsMasked,
345  /*IsStridedOrIndexed*/ true, Operands, &IndexVT);
346 
348  "Element count mismatch");
349 
350  RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
351  unsigned IndexScalarSize = IndexVT.getScalarSizeInBits();
352  const RISCV::VSXSEGPseudo *P = RISCV::getVSXSEGPseudo(
353  NF, IsMasked, IsOrdered, IndexScalarSize, static_cast<unsigned>(LMUL),
354  static_cast<unsigned>(IndexLMUL));
356  CurDAG->getMachineNode(P->Pseudo, DL, Node->getValueType(0), Operands);
357 
358  if (auto *MemOp = dyn_cast<MemSDNode>(Node))
359  CurDAG->setNodeMemRefs(Store, {MemOp->getMemOperand()});
360 
361  ReplaceNode(Node, Store);
362 }
363 
364 
366  // If we have a custom node, we have already selected.
367  if (Node->isMachineOpcode()) {
368  LLVM_DEBUG(dbgs() << "== "; Node->dump(CurDAG); dbgs() << "\n");
369  Node->setNodeId(-1);
370  return;
371  }
372 
373  // Instruction Selection not handled by the auto-generated tablegen selection
374  // should be handled here.
375  unsigned Opcode = Node->getOpcode();
376  MVT XLenVT = Subtarget->getXLenVT();
377  SDLoc DL(Node);
378  MVT VT = Node->getSimpleValueType(0);
379 
380  switch (Opcode) {
381  case ISD::Constant: {
382  auto *ConstNode = cast<ConstantSDNode>(Node);
383  if (VT == XLenVT && ConstNode->isNullValue()) {
384  SDValue New =
385  CurDAG->getCopyFromReg(CurDAG->getEntryNode(), DL, RISCV::X0, XLenVT);
386  ReplaceNode(Node, New.getNode());
387  return;
388  }
389  ReplaceNode(Node, selectImm(CurDAG, DL, ConstNode->getSExtValue(), XLenVT));
390  return;
391  }
392  case ISD::FrameIndex: {
393  SDValue Imm = CurDAG->getTargetConstant(0, DL, XLenVT);
394  int FI = cast<FrameIndexSDNode>(Node)->getIndex();
395  SDValue TFI = CurDAG->getTargetFrameIndex(FI, VT);
396  ReplaceNode(Node, CurDAG->getMachineNode(RISCV::ADDI, DL, VT, TFI, Imm));
397  return;
398  }
399  case ISD::SRL: {
400  // We don't need this transform if zext.h is supported.
401  if (Subtarget->hasStdExtZbb() || Subtarget->hasStdExtZbp())
402  break;
403  // Optimize (srl (and X, 0xffff), C) ->
404  // (srli (slli X, (XLen-16), (XLen-16) + C)
405  // Taking into account that the 0xffff may have had lower bits unset by
406  // SimplifyDemandedBits. This avoids materializing the 0xffff immediate.
407  // This pattern occurs when type legalizing i16 right shifts.
408  // FIXME: This could be extended to other AND masks.
409  auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
410  if (N1C) {
411  uint64_t ShAmt = N1C->getZExtValue();
412  SDValue N0 = Node->getOperand(0);
413  if (ShAmt < 16 && N0.getOpcode() == ISD::AND && N0.hasOneUse() &&
414  isa<ConstantSDNode>(N0.getOperand(1))) {
415  uint64_t Mask = N0.getConstantOperandVal(1);
416  Mask |= maskTrailingOnes<uint64_t>(ShAmt);
417  if (Mask == 0xffff) {
418  unsigned LShAmt = Subtarget->getXLen() - 16;
419  SDNode *SLLI =
420  CurDAG->getMachineNode(RISCV::SLLI, DL, VT, N0->getOperand(0),
421  CurDAG->getTargetConstant(LShAmt, DL, VT));
422  SDNode *SRLI = CurDAG->getMachineNode(
423  RISCV::SRLI, DL, VT, SDValue(SLLI, 0),
424  CurDAG->getTargetConstant(LShAmt + ShAmt, DL, VT));
425  ReplaceNode(Node, SRLI);
426  return;
427  }
428  }
429  }
430 
431  break;
432  }
434  unsigned IntNo = Node->getConstantOperandVal(0);
435  switch (IntNo) {
436  // By default we do not custom select any intrinsic.
437  default:
438  break;
439  case Intrinsic::riscv_vmsgeu:
440  case Intrinsic::riscv_vmsge: {
441  SDValue Src1 = Node->getOperand(1);
442  SDValue Src2 = Node->getOperand(2);
443  // Only custom select scalar second operand.
444  if (Src2.getValueType() != XLenVT)
445  break;
446  // Small constants are handled with patterns.
447  if (auto *C = dyn_cast<ConstantSDNode>(Src2)) {
448  int64_t CVal = C->getSExtValue();
449  if (CVal >= -15 && CVal <= 16)
450  break;
451  }
452  bool IsUnsigned = IntNo == Intrinsic::riscv_vmsgeu;
453  MVT Src1VT = Src1.getSimpleValueType();
454  unsigned VMSLTOpcode, VMNANDOpcode;
455  switch (RISCVTargetLowering::getLMUL(Src1VT)) {
456  default:
457  llvm_unreachable("Unexpected LMUL!");
459  VMSLTOpcode =
460  IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF8 : RISCV::PseudoVMSLT_VX_MF8;
461  VMNANDOpcode = RISCV::PseudoVMNAND_MM_MF8;
462  break;
464  VMSLTOpcode =
465  IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF4 : RISCV::PseudoVMSLT_VX_MF4;
466  VMNANDOpcode = RISCV::PseudoVMNAND_MM_MF4;
467  break;
469  VMSLTOpcode =
470  IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF2 : RISCV::PseudoVMSLT_VX_MF2;
471  VMNANDOpcode = RISCV::PseudoVMNAND_MM_MF2;
472  break;
474  VMSLTOpcode =
475  IsUnsigned ? RISCV::PseudoVMSLTU_VX_M1 : RISCV::PseudoVMSLT_VX_M1;
476  VMNANDOpcode = RISCV::PseudoVMNAND_MM_M1;
477  break;
479  VMSLTOpcode =
480  IsUnsigned ? RISCV::PseudoVMSLTU_VX_M2 : RISCV::PseudoVMSLT_VX_M2;
481  VMNANDOpcode = RISCV::PseudoVMNAND_MM_M2;
482  break;
484  VMSLTOpcode =
485  IsUnsigned ? RISCV::PseudoVMSLTU_VX_M4 : RISCV::PseudoVMSLT_VX_M4;
486  VMNANDOpcode = RISCV::PseudoVMNAND_MM_M4;
487  break;
489  VMSLTOpcode =
490  IsUnsigned ? RISCV::PseudoVMSLTU_VX_M8 : RISCV::PseudoVMSLT_VX_M8;
491  VMNANDOpcode = RISCV::PseudoVMNAND_MM_M8;
492  break;
493  }
495  Log2_32(Src1VT.getScalarSizeInBits()), DL, XLenVT);
496  SDValue VL;
497  selectVLOp(Node->getOperand(3), VL);
498 
499  // Expand to
500  // vmslt{u}.vx vd, va, x; vmnand.mm vd, vd, vd
501  SDValue Cmp = SDValue(
502  CurDAG->getMachineNode(VMSLTOpcode, DL, VT, {Src1, Src2, VL, SEW}),
503  0);
504  ReplaceNode(Node, CurDAG->getMachineNode(VMNANDOpcode, DL, VT,
505  {Cmp, Cmp, VL, SEW}));
506  return;
507  }
508  case Intrinsic::riscv_vmsgeu_mask:
509  case Intrinsic::riscv_vmsge_mask: {
510  SDValue Src1 = Node->getOperand(2);
511  SDValue Src2 = Node->getOperand(3);
512  // Only custom select scalar second operand.
513  if (Src2.getValueType() != XLenVT)
514  break;
515  // Small constants are handled with patterns.
516  if (auto *C = dyn_cast<ConstantSDNode>(Src2)) {
517  int64_t CVal = C->getSExtValue();
518  if (CVal >= -15 && CVal <= 16)
519  break;
520  }
521  bool IsUnsigned = IntNo == Intrinsic::riscv_vmsgeu_mask;
522  MVT Src1VT = Src1.getSimpleValueType();
523  unsigned VMSLTOpcode, VMSLTMaskOpcode, VMXOROpcode, VMANDNOTOpcode;
524  switch (RISCVTargetLowering::getLMUL(Src1VT)) {
525  default:
526  llvm_unreachable("Unexpected LMUL!");
528  VMSLTOpcode =
529  IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF8 : RISCV::PseudoVMSLT_VX_MF8;
530  VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF8_MASK
531  : RISCV::PseudoVMSLT_VX_MF8_MASK;
532  VMXOROpcode = RISCV::PseudoVMXOR_MM_MF8;
533  VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_MF8;
534  break;
536  VMSLTOpcode =
537  IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF4 : RISCV::PseudoVMSLT_VX_MF4;
538  VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF4_MASK
539  : RISCV::PseudoVMSLT_VX_MF4_MASK;
540  VMXOROpcode = RISCV::PseudoVMXOR_MM_MF4;
541  VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_MF4;
542  break;
544  VMSLTOpcode =
545  IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF2 : RISCV::PseudoVMSLT_VX_MF2;
546  VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF2_MASK
547  : RISCV::PseudoVMSLT_VX_MF2_MASK;
548  VMXOROpcode = RISCV::PseudoVMXOR_MM_MF2;
549  VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_MF2;
550  break;
552  VMSLTOpcode =
553  IsUnsigned ? RISCV::PseudoVMSLTU_VX_M1 : RISCV::PseudoVMSLT_VX_M1;
554  VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_M1_MASK
555  : RISCV::PseudoVMSLT_VX_M1_MASK;
556  VMXOROpcode = RISCV::PseudoVMXOR_MM_M1;
557  VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_M1;
558  break;
560  VMSLTOpcode =
561  IsUnsigned ? RISCV::PseudoVMSLTU_VX_M2 : RISCV::PseudoVMSLT_VX_M2;
562  VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_M2_MASK
563  : RISCV::PseudoVMSLT_VX_M2_MASK;
564  VMXOROpcode = RISCV::PseudoVMXOR_MM_M2;
565  VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_M2;
566  break;
568  VMSLTOpcode =
569  IsUnsigned ? RISCV::PseudoVMSLTU_VX_M4 : RISCV::PseudoVMSLT_VX_M4;
570  VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_M4_MASK
571  : RISCV::PseudoVMSLT_VX_M4_MASK;
572  VMXOROpcode = RISCV::PseudoVMXOR_MM_M4;
573  VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_M4;
574  break;
576  VMSLTOpcode =
577  IsUnsigned ? RISCV::PseudoVMSLTU_VX_M8 : RISCV::PseudoVMSLT_VX_M8;
578  VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_M8_MASK
579  : RISCV::PseudoVMSLT_VX_M8_MASK;
580  VMXOROpcode = RISCV::PseudoVMXOR_MM_M8;
581  VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_M8;
582  break;
583  }
585  Log2_32(Src1VT.getScalarSizeInBits()), DL, XLenVT);
586  SDValue VL;
587  selectVLOp(Node->getOperand(5), VL);
588  SDValue MaskedOff = Node->getOperand(1);
589  SDValue Mask = Node->getOperand(4);
590  // If the MaskedOff value and the Mask are the same value use
591  // vmslt{u}.vx vt, va, x; vmandnot.mm vd, vd, vt
592  // This avoids needing to copy v0 to vd before starting the next sequence.
593  if (Mask == MaskedOff) {
594  SDValue Cmp = SDValue(
595  CurDAG->getMachineNode(VMSLTOpcode, DL, VT, {Src1, Src2, VL, SEW}),
596  0);
597  ReplaceNode(Node, CurDAG->getMachineNode(VMANDNOTOpcode, DL, VT,
598  {Mask, Cmp, VL, SEW}));
599  return;
600  }
601 
602  // Otherwise use
603  // vmslt{u}.vx vd, va, x, v0.t; vmxor.mm vd, vd, v0
604  SDValue Cmp = SDValue(
605  CurDAG->getMachineNode(VMSLTMaskOpcode, DL, VT,
606  {MaskedOff, Src1, Src2, Mask, VL, SEW}),
607  0);
608  ReplaceNode(Node, CurDAG->getMachineNode(VMXOROpcode, DL, VT,
609  {Cmp, Mask, VL, SEW}));
610  return;
611  }
612  }
613  break;
614  }
615  case ISD::INTRINSIC_W_CHAIN: {
616  unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
617  switch (IntNo) {
618  // By default we do not custom select any intrinsic.
619  default:
620  break;
621 
622  case Intrinsic::riscv_vsetvli:
623  case Intrinsic::riscv_vsetvlimax: {
624  if (!Subtarget->hasStdExtV())
625  break;
626 
627  bool VLMax = IntNo == Intrinsic::riscv_vsetvlimax;
628  unsigned Offset = VLMax ? 2 : 3;
629 
630  assert(Node->getNumOperands() == Offset + 2 &&
631  "Unexpected number of operands");
632 
633  unsigned SEW =
634  RISCVVType::decodeVSEW(Node->getConstantOperandVal(Offset) & 0x7);
635  RISCVII::VLMUL VLMul = static_cast<RISCVII::VLMUL>(
636  Node->getConstantOperandVal(Offset + 1) & 0x7);
637 
638  unsigned VTypeI = RISCVVType::encodeVTYPE(
639  VLMul, SEW, /*TailAgnostic*/ true, /*MaskAgnostic*/ false);
640  SDValue VTypeIOp = CurDAG->getTargetConstant(VTypeI, DL, XLenVT);
641 
642  SDValue VLOperand;
643  if (VLMax) {
644  VLOperand = CurDAG->getRegister(RISCV::X0, XLenVT);
645  } else {
646  VLOperand = Node->getOperand(2);
647 
648  if (auto *C = dyn_cast<ConstantSDNode>(VLOperand)) {
649  uint64_t AVL = C->getZExtValue();
650  if (isUInt<5>(AVL)) {
651  SDValue VLImm = CurDAG->getTargetConstant(AVL, DL, XLenVT);
652  ReplaceNode(
653  Node, CurDAG->getMachineNode(RISCV::PseudoVSETIVLI, DL, XLenVT,
654  MVT::Other, VLImm, VTypeIOp,
655  /* Chain */ Node->getOperand(0)));
656  return;
657  }
658  }
659  }
660 
661  ReplaceNode(Node,
662  CurDAG->getMachineNode(RISCV::PseudoVSETVLI, DL, XLenVT,
663  MVT::Other, VLOperand, VTypeIOp,
664  /* Chain */ Node->getOperand(0)));
665  return;
666  }
667  case Intrinsic::riscv_vlseg2:
668  case Intrinsic::riscv_vlseg3:
669  case Intrinsic::riscv_vlseg4:
670  case Intrinsic::riscv_vlseg5:
671  case Intrinsic::riscv_vlseg6:
672  case Intrinsic::riscv_vlseg7:
673  case Intrinsic::riscv_vlseg8: {
674  selectVLSEG(Node, /*IsMasked*/ false, /*IsStrided*/ false);
675  return;
676  }
677  case Intrinsic::riscv_vlseg2_mask:
678  case Intrinsic::riscv_vlseg3_mask:
679  case Intrinsic::riscv_vlseg4_mask:
680  case Intrinsic::riscv_vlseg5_mask:
681  case Intrinsic::riscv_vlseg6_mask:
682  case Intrinsic::riscv_vlseg7_mask:
683  case Intrinsic::riscv_vlseg8_mask: {
684  selectVLSEG(Node, /*IsMasked*/ true, /*IsStrided*/ false);
685  return;
686  }
687  case Intrinsic::riscv_vlsseg2:
688  case Intrinsic::riscv_vlsseg3:
689  case Intrinsic::riscv_vlsseg4:
690  case Intrinsic::riscv_vlsseg5:
691  case Intrinsic::riscv_vlsseg6:
692  case Intrinsic::riscv_vlsseg7:
693  case Intrinsic::riscv_vlsseg8: {
694  selectVLSEG(Node, /*IsMasked*/ false, /*IsStrided*/ true);
695  return;
696  }
697  case Intrinsic::riscv_vlsseg2_mask:
698  case Intrinsic::riscv_vlsseg3_mask:
699  case Intrinsic::riscv_vlsseg4_mask:
700  case Intrinsic::riscv_vlsseg5_mask:
701  case Intrinsic::riscv_vlsseg6_mask:
702  case Intrinsic::riscv_vlsseg7_mask:
703  case Intrinsic::riscv_vlsseg8_mask: {
704  selectVLSEG(Node, /*IsMasked*/ true, /*IsStrided*/ true);
705  return;
706  }
707  case Intrinsic::riscv_vloxseg2:
708  case Intrinsic::riscv_vloxseg3:
709  case Intrinsic::riscv_vloxseg4:
710  case Intrinsic::riscv_vloxseg5:
711  case Intrinsic::riscv_vloxseg6:
712  case Intrinsic::riscv_vloxseg7:
713  case Intrinsic::riscv_vloxseg8:
714  selectVLXSEG(Node, /*IsMasked*/ false, /*IsOrdered*/ true);
715  return;
716  case Intrinsic::riscv_vluxseg2:
717  case Intrinsic::riscv_vluxseg3:
718  case Intrinsic::riscv_vluxseg4:
719  case Intrinsic::riscv_vluxseg5:
720  case Intrinsic::riscv_vluxseg6:
721  case Intrinsic::riscv_vluxseg7:
722  case Intrinsic::riscv_vluxseg8:
723  selectVLXSEG(Node, /*IsMasked*/ false, /*IsOrdered*/ false);
724  return;
725  case Intrinsic::riscv_vloxseg2_mask:
726  case Intrinsic::riscv_vloxseg3_mask:
727  case Intrinsic::riscv_vloxseg4_mask:
728  case Intrinsic::riscv_vloxseg5_mask:
729  case Intrinsic::riscv_vloxseg6_mask:
730  case Intrinsic::riscv_vloxseg7_mask:
731  case Intrinsic::riscv_vloxseg8_mask:
732  selectVLXSEG(Node, /*IsMasked*/ true, /*IsOrdered*/ true);
733  return;
734  case Intrinsic::riscv_vluxseg2_mask:
735  case Intrinsic::riscv_vluxseg3_mask:
736  case Intrinsic::riscv_vluxseg4_mask:
737  case Intrinsic::riscv_vluxseg5_mask:
738  case Intrinsic::riscv_vluxseg6_mask:
739  case Intrinsic::riscv_vluxseg7_mask:
740  case Intrinsic::riscv_vluxseg8_mask:
741  selectVLXSEG(Node, /*IsMasked*/ true, /*IsOrdered*/ false);
742  return;
743  case Intrinsic::riscv_vlseg8ff:
744  case Intrinsic::riscv_vlseg7ff:
745  case Intrinsic::riscv_vlseg6ff:
746  case Intrinsic::riscv_vlseg5ff:
747  case Intrinsic::riscv_vlseg4ff:
748  case Intrinsic::riscv_vlseg3ff:
749  case Intrinsic::riscv_vlseg2ff: {
750  selectVLSEGFF(Node, /*IsMasked*/ false);
751  return;
752  }
753  case Intrinsic::riscv_vlseg8ff_mask:
754  case Intrinsic::riscv_vlseg7ff_mask:
755  case Intrinsic::riscv_vlseg6ff_mask:
756  case Intrinsic::riscv_vlseg5ff_mask:
757  case Intrinsic::riscv_vlseg4ff_mask:
758  case Intrinsic::riscv_vlseg3ff_mask:
759  case Intrinsic::riscv_vlseg2ff_mask: {
760  selectVLSEGFF(Node, /*IsMasked*/ true);
761  return;
762  }
763  case Intrinsic::riscv_vloxei:
764  case Intrinsic::riscv_vloxei_mask:
765  case Intrinsic::riscv_vluxei:
766  case Intrinsic::riscv_vluxei_mask: {
767  bool IsMasked = IntNo == Intrinsic::riscv_vloxei_mask ||
768  IntNo == Intrinsic::riscv_vluxei_mask;
769  bool IsOrdered = IntNo == Intrinsic::riscv_vloxei ||
770  IntNo == Intrinsic::riscv_vloxei_mask;
771 
772  MVT VT = Node->getSimpleValueType(0);
773  unsigned ScalarSize = VT.getScalarSizeInBits();
774 
775  unsigned CurOp = 2;
777  if (IsMasked)
778  Operands.push_back(Node->getOperand(CurOp++));
779 
780  MVT IndexVT;
781  addVectorLoadStoreOperands(Node, ScalarSize, DL, CurOp, IsMasked,
782  /*IsStridedOrIndexed*/ true, Operands,
783  &IndexVT);
784 
786  "Element count mismatch");
787 
789  RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
790  unsigned IndexScalarSize = IndexVT.getScalarSizeInBits();
791  const RISCV::VLX_VSXPseudo *P = RISCV::getVLXPseudo(
792  IsMasked, IsOrdered, IndexScalarSize, static_cast<unsigned>(LMUL),
793  static_cast<unsigned>(IndexLMUL));
795  CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
796 
797  if (auto *MemOp = dyn_cast<MemSDNode>(Node))
798  CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
799 
800  ReplaceNode(Node, Load);
801  return;
802  }
803  case Intrinsic::riscv_vle1:
804  case Intrinsic::riscv_vle:
805  case Intrinsic::riscv_vle_mask:
806  case Intrinsic::riscv_vlse:
807  case Intrinsic::riscv_vlse_mask: {
808  bool IsMasked = IntNo == Intrinsic::riscv_vle_mask ||
809  IntNo == Intrinsic::riscv_vlse_mask;
810  bool IsStrided =
811  IntNo == Intrinsic::riscv_vlse || IntNo == Intrinsic::riscv_vlse_mask;
812 
813  MVT VT = Node->getSimpleValueType(0);
814  unsigned ScalarSize = VT.getScalarSizeInBits();
815  // VLE1 uses an SEW of 8.
816  unsigned SEW = (IntNo == Intrinsic::riscv_vle1) ? 8 : ScalarSize;
817 
818  unsigned CurOp = 2;
820  if (IsMasked)
821  Operands.push_back(Node->getOperand(CurOp++));
822 
823  addVectorLoadStoreOperands(Node, SEW, DL, CurOp, IsMasked, IsStrided,
824  Operands);
825 
827  const RISCV::VLEPseudo *P =
828  RISCV::getVLEPseudo(IsMasked, IsStrided, /*FF*/ false, ScalarSize,
829  static_cast<unsigned>(LMUL));
831  CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
832 
833  if (auto *MemOp = dyn_cast<MemSDNode>(Node))
834  CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
835 
836  ReplaceNode(Node, Load);
837  return;
838  }
839  case Intrinsic::riscv_vleff:
840  case Intrinsic::riscv_vleff_mask: {
841  bool IsMasked = IntNo == Intrinsic::riscv_vleff_mask;
842 
843  MVT VT = Node->getSimpleValueType(0);
844  unsigned ScalarSize = VT.getScalarSizeInBits();
845 
846  unsigned CurOp = 2;
848  if (IsMasked)
849  Operands.push_back(Node->getOperand(CurOp++));
850 
851  addVectorLoadStoreOperands(Node, ScalarSize, DL, CurOp, IsMasked,
852  /*IsStridedOrIndexed*/ false, Operands);
853 
855  const RISCV::VLEPseudo *P =
856  RISCV::getVLEPseudo(IsMasked, /*Strided*/ false, /*FF*/ true,
857  ScalarSize, static_cast<unsigned>(LMUL));
859  CurDAG->getMachineNode(P->Pseudo, DL, Node->getValueType(0),
861  SDNode *ReadVL = CurDAG->getMachineNode(RISCV::PseudoReadVL, DL, XLenVT,
862  /*Glue*/ SDValue(Load, 2));
863 
864  if (auto *MemOp = dyn_cast<MemSDNode>(Node))
865  CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
866 
867  ReplaceUses(SDValue(Node, 0), SDValue(Load, 0));
868  ReplaceUses(SDValue(Node, 1), SDValue(ReadVL, 0)); // VL
869  ReplaceUses(SDValue(Node, 2), SDValue(Load, 1)); // Chain
870  CurDAG->RemoveDeadNode(Node);
871  return;
872  }
873  }
874  break;
875  }
876  case ISD::INTRINSIC_VOID: {
877  unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
878  switch (IntNo) {
879  case Intrinsic::riscv_vsseg2:
880  case Intrinsic::riscv_vsseg3:
881  case Intrinsic::riscv_vsseg4:
882  case Intrinsic::riscv_vsseg5:
883  case Intrinsic::riscv_vsseg6:
884  case Intrinsic::riscv_vsseg7:
885  case Intrinsic::riscv_vsseg8: {
886  selectVSSEG(Node, /*IsMasked*/ false, /*IsStrided*/ false);
887  return;
888  }
889  case Intrinsic::riscv_vsseg2_mask:
890  case Intrinsic::riscv_vsseg3_mask:
891  case Intrinsic::riscv_vsseg4_mask:
892  case Intrinsic::riscv_vsseg5_mask:
893  case Intrinsic::riscv_vsseg6_mask:
894  case Intrinsic::riscv_vsseg7_mask:
895  case Intrinsic::riscv_vsseg8_mask: {
896  selectVSSEG(Node, /*IsMasked*/ true, /*IsStrided*/ false);
897  return;
898  }
899  case Intrinsic::riscv_vssseg2:
900  case Intrinsic::riscv_vssseg3:
901  case Intrinsic::riscv_vssseg4:
902  case Intrinsic::riscv_vssseg5:
903  case Intrinsic::riscv_vssseg6:
904  case Intrinsic::riscv_vssseg7:
905  case Intrinsic::riscv_vssseg8: {
906  selectVSSEG(Node, /*IsMasked*/ false, /*IsStrided*/ true);
907  return;
908  }
909  case Intrinsic::riscv_vssseg2_mask:
910  case Intrinsic::riscv_vssseg3_mask:
911  case Intrinsic::riscv_vssseg4_mask:
912  case Intrinsic::riscv_vssseg5_mask:
913  case Intrinsic::riscv_vssseg6_mask:
914  case Intrinsic::riscv_vssseg7_mask:
915  case Intrinsic::riscv_vssseg8_mask: {
916  selectVSSEG(Node, /*IsMasked*/ true, /*IsStrided*/ true);
917  return;
918  }
919  case Intrinsic::riscv_vsoxseg2:
920  case Intrinsic::riscv_vsoxseg3:
921  case Intrinsic::riscv_vsoxseg4:
922  case Intrinsic::riscv_vsoxseg5:
923  case Intrinsic::riscv_vsoxseg6:
924  case Intrinsic::riscv_vsoxseg7:
925  case Intrinsic::riscv_vsoxseg8:
926  selectVSXSEG(Node, /*IsMasked*/ false, /*IsOrdered*/ true);
927  return;
928  case Intrinsic::riscv_vsuxseg2:
929  case Intrinsic::riscv_vsuxseg3:
930  case Intrinsic::riscv_vsuxseg4:
931  case Intrinsic::riscv_vsuxseg5:
932  case Intrinsic::riscv_vsuxseg6:
933  case Intrinsic::riscv_vsuxseg7:
934  case Intrinsic::riscv_vsuxseg8:
935  selectVSXSEG(Node, /*IsMasked*/ false, /*IsOrdered*/ false);
936  return;
937  case Intrinsic::riscv_vsoxseg2_mask:
938  case Intrinsic::riscv_vsoxseg3_mask:
939  case Intrinsic::riscv_vsoxseg4_mask:
940  case Intrinsic::riscv_vsoxseg5_mask:
941  case Intrinsic::riscv_vsoxseg6_mask:
942  case Intrinsic::riscv_vsoxseg7_mask:
943  case Intrinsic::riscv_vsoxseg8_mask:
944  selectVSXSEG(Node, /*IsMasked*/ true, /*IsOrdered*/ true);
945  return;
946  case Intrinsic::riscv_vsuxseg2_mask:
947  case Intrinsic::riscv_vsuxseg3_mask:
948  case Intrinsic::riscv_vsuxseg4_mask:
949  case Intrinsic::riscv_vsuxseg5_mask:
950  case Intrinsic::riscv_vsuxseg6_mask:
951  case Intrinsic::riscv_vsuxseg7_mask:
952  case Intrinsic::riscv_vsuxseg8_mask:
953  selectVSXSEG(Node, /*IsMasked*/ true, /*IsOrdered*/ false);
954  return;
955  case Intrinsic::riscv_vsoxei:
956  case Intrinsic::riscv_vsoxei_mask:
957  case Intrinsic::riscv_vsuxei:
958  case Intrinsic::riscv_vsuxei_mask: {
959  bool IsMasked = IntNo == Intrinsic::riscv_vsoxei_mask ||
960  IntNo == Intrinsic::riscv_vsuxei_mask;
961  bool IsOrdered = IntNo == Intrinsic::riscv_vsoxei ||
962  IntNo == Intrinsic::riscv_vsoxei_mask;
963 
964  MVT VT = Node->getOperand(2)->getSimpleValueType(0);
965  unsigned ScalarSize = VT.getScalarSizeInBits();
966 
967  unsigned CurOp = 2;
969  Operands.push_back(Node->getOperand(CurOp++)); // Store value.
970 
971  MVT IndexVT;
972  addVectorLoadStoreOperands(Node, ScalarSize, DL, CurOp, IsMasked,
973  /*IsStridedOrIndexed*/ true, Operands,
974  &IndexVT);
975 
977  "Element count mismatch");
978 
980  RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
981  unsigned IndexScalarSize = IndexVT.getScalarSizeInBits();
982  const RISCV::VLX_VSXPseudo *P = RISCV::getVSXPseudo(
983  IsMasked, IsOrdered, IndexScalarSize, static_cast<unsigned>(LMUL),
984  static_cast<unsigned>(IndexLMUL));
986  CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
987 
988  if (auto *MemOp = dyn_cast<MemSDNode>(Node))
989  CurDAG->setNodeMemRefs(Store, {MemOp->getMemOperand()});
990 
991  ReplaceNode(Node, Store);
992  return;
993  }
994  case Intrinsic::riscv_vse1:
995  case Intrinsic::riscv_vse:
996  case Intrinsic::riscv_vse_mask:
997  case Intrinsic::riscv_vsse:
998  case Intrinsic::riscv_vsse_mask: {
999  bool IsMasked = IntNo == Intrinsic::riscv_vse_mask ||
1000  IntNo == Intrinsic::riscv_vsse_mask;
1001  bool IsStrided =
1002  IntNo == Intrinsic::riscv_vsse || IntNo == Intrinsic::riscv_vsse_mask;
1003 
1004  MVT VT = Node->getOperand(2)->getSimpleValueType(0);
1005  unsigned ScalarSize = VT.getScalarSizeInBits();
1006  // VSE1 uses an SEW of 8.
1007  unsigned SEW = (IntNo == Intrinsic::riscv_vse1) ? 8 : ScalarSize;
1008 
1009  unsigned CurOp = 2;
1011  Operands.push_back(Node->getOperand(CurOp++)); // Store value.
1012 
1013  addVectorLoadStoreOperands(Node, SEW, DL, CurOp, IsMasked, IsStrided,
1014  Operands);
1015 
1017  const RISCV::VSEPseudo *P = RISCV::getVSEPseudo(
1018  IsMasked, IsStrided, ScalarSize, static_cast<unsigned>(LMUL));
1019  MachineSDNode *Store =
1020  CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
1021  if (auto *MemOp = dyn_cast<MemSDNode>(Node))
1022  CurDAG->setNodeMemRefs(Store, {MemOp->getMemOperand()});
1023 
1024  ReplaceNode(Node, Store);
1025  return;
1026  }
1027  }
1028  break;
1029  }
1030  case ISD::BITCAST: {
1031  MVT SrcVT = Node->getOperand(0).getSimpleValueType();
1032  // Just drop bitcasts between vectors if both are fixed or both are
1033  // scalable.
1034  if ((VT.isScalableVector() && SrcVT.isScalableVector()) ||
1035  (VT.isFixedLengthVector() && SrcVT.isFixedLengthVector())) {
1036  ReplaceUses(SDValue(Node, 0), Node->getOperand(0));
1037  CurDAG->RemoveDeadNode(Node);
1038  return;
1039  }
1040  break;
1041  }
1042  case ISD::INSERT_SUBVECTOR: {
1043  SDValue V = Node->getOperand(0);
1044  SDValue SubV = Node->getOperand(1);
1045  SDLoc DL(SubV);
1046  auto Idx = Node->getConstantOperandVal(2);
1047  MVT SubVecVT = SubV.getSimpleValueType();
1048 
1049  const RISCVTargetLowering &TLI = *Subtarget->getTargetLowering();
1050  MVT SubVecContainerVT = SubVecVT;
1051  // Establish the correct scalable-vector types for any fixed-length type.
1052  if (SubVecVT.isFixedLengthVector())
1053  SubVecContainerVT = TLI.getContainerForFixedLengthVector(SubVecVT);
1054  if (VT.isFixedLengthVector())
1055  VT = TLI.getContainerForFixedLengthVector(VT);
1056 
1057  const auto *TRI = Subtarget->getRegisterInfo();
1058  unsigned SubRegIdx;
1059  std::tie(SubRegIdx, Idx) =
1061  VT, SubVecContainerVT, Idx, TRI);
1062 
1063  // If the Idx hasn't been completely eliminated then this is a subvector
1064  // insert which doesn't naturally align to a vector register. These must
1065  // be handled using instructions to manipulate the vector registers.
1066  if (Idx != 0)
1067  break;
1068 
1069  RISCVII::VLMUL SubVecLMUL = RISCVTargetLowering::getLMUL(SubVecContainerVT);
1070  bool IsSubVecPartReg = SubVecLMUL == RISCVII::VLMUL::LMUL_F2 ||
1071  SubVecLMUL == RISCVII::VLMUL::LMUL_F4 ||
1072  SubVecLMUL == RISCVII::VLMUL::LMUL_F8;
1073  (void)IsSubVecPartReg; // Silence unused variable warning without asserts.
1074  assert((!IsSubVecPartReg || V.isUndef()) &&
1075  "Expecting lowering to have created legal INSERT_SUBVECTORs when "
1076  "the subvector is smaller than a full-sized register");
1077 
1078  // If we haven't set a SubRegIdx, then we must be going between
1079  // equally-sized LMUL groups (e.g. VR -> VR). This can be done as a copy.
1080  if (SubRegIdx == RISCV::NoSubRegister) {
1081  unsigned InRegClassID = RISCVTargetLowering::getRegClassIDForVecVT(VT);
1083  InRegClassID &&
1084  "Unexpected subvector extraction");
1085  SDValue RC = CurDAG->getTargetConstant(InRegClassID, DL, XLenVT);
1086  SDNode *NewNode = CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS,
1087  DL, VT, SubV, RC);
1088  ReplaceNode(Node, NewNode);
1089  return;
1090  }
1091 
1092  SDValue Insert = CurDAG->getTargetInsertSubreg(SubRegIdx, DL, VT, V, SubV);
1093  ReplaceNode(Node, Insert.getNode());
1094  return;
1095  }
1096  case ISD::EXTRACT_SUBVECTOR: {
1097  SDValue V = Node->getOperand(0);
1098  auto Idx = Node->getConstantOperandVal(1);
1099  MVT InVT = V.getSimpleValueType();
1100  SDLoc DL(V);
1101 
1102  const RISCVTargetLowering &TLI = *Subtarget->getTargetLowering();
1103  MVT SubVecContainerVT = VT;
1104  // Establish the correct scalable-vector types for any fixed-length type.
1105  if (VT.isFixedLengthVector())
1106  SubVecContainerVT = TLI.getContainerForFixedLengthVector(VT);
1107  if (InVT.isFixedLengthVector())
1108  InVT = TLI.getContainerForFixedLengthVector(InVT);
1109 
1110  const auto *TRI = Subtarget->getRegisterInfo();
1111  unsigned SubRegIdx;
1112  std::tie(SubRegIdx, Idx) =
1114  InVT, SubVecContainerVT, Idx, TRI);
1115 
1116  // If the Idx hasn't been completely eliminated then this is a subvector
1117  // extract which doesn't naturally align to a vector register. These must
1118  // be handled using instructions to manipulate the vector registers.
1119  if (Idx != 0)
1120  break;
1121 
1122  // If we haven't set a SubRegIdx, then we must be going between
1123  // equally-sized LMUL types (e.g. VR -> VR). This can be done as a copy.
1124  if (SubRegIdx == RISCV::NoSubRegister) {
1125  unsigned InRegClassID = RISCVTargetLowering::getRegClassIDForVecVT(InVT);
1127  InRegClassID &&
1128  "Unexpected subvector extraction");
1129  SDValue RC = CurDAG->getTargetConstant(InRegClassID, DL, XLenVT);
1130  SDNode *NewNode =
1131  CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS, DL, VT, V, RC);
1132  ReplaceNode(Node, NewNode);
1133  return;
1134  }
1135 
1136  SDValue Extract = CurDAG->getTargetExtractSubreg(SubRegIdx, DL, VT, V);
1137  ReplaceNode(Node, Extract.getNode());
1138  return;
1139  }
1140  case RISCVISD::VMV_V_X_VL:
1141  case RISCVISD::VFMV_V_F_VL: {
1142  // Try to match splat of a scalar load to a strided load with stride of x0.
1143  SDValue Src = Node->getOperand(0);
1144  auto *Ld = dyn_cast<LoadSDNode>(Src);
1145  if (!Ld)
1146  break;
1147  EVT MemVT = Ld->getMemoryVT();
1148  // The memory VT should be the same size as the element type.
1149  if (MemVT.getStoreSize() != VT.getVectorElementType().getStoreSize())
1150  break;
1151  if (!IsProfitableToFold(Src, Node, Node) ||
1152  !IsLegalToFold(Src, Node, Node, TM.getOptLevel()))
1153  break;
1154 
1155  SDValue VL;
1156  selectVLOp(Node->getOperand(1), VL);
1157 
1158  unsigned ScalarSize = VT.getScalarSizeInBits();
1159  SDValue SEW = CurDAG->getTargetConstant(Log2_32(ScalarSize), DL, XLenVT);
1160 
1161  SDValue Operands[] = {Ld->getBasePtr(),
1162  CurDAG->getRegister(RISCV::X0, XLenVT), VL, SEW,
1163  Ld->getChain()};
1164 
1166  const RISCV::VLEPseudo *P = RISCV::getVLEPseudo(
1167  /*IsMasked*/ false, /*IsStrided*/ true, /*FF*/ false, ScalarSize,
1168  static_cast<unsigned>(LMUL));
1169  MachineSDNode *Load =
1170  CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
1171 
1172  if (auto *MemOp = dyn_cast<MemSDNode>(Node))
1173  CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
1174 
1175  ReplaceNode(Node, Load);
1176  return;
1177  }
1178  }
1179 
1180  // Select the default instruction.
1181  SelectCode(Node);
1182 }
1183 
1185  const SDValue &Op, unsigned ConstraintID, std::vector<SDValue> &OutOps) {
1186  switch (ConstraintID) {
1188  // We just support simple memory operands that have a single address
1189  // operand and need no special handling.
1190  OutOps.push_back(Op);
1191  return false;
1193  OutOps.push_back(Op);
1194  return false;
1195  default:
1196  break;
1197  }
1198 
1199  return true;
1200 }
1201 
1203  if (auto *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
1204  Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), Subtarget->getXLenVT());
1205  return true;
1206  }
1207  return false;
1208 }
1209 
1211  // If this is FrameIndex, select it directly. Otherwise just let it get
1212  // selected to a register independently.
1213  if (auto *FIN = dyn_cast<FrameIndexSDNode>(Addr))
1214  Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), Subtarget->getXLenVT());
1215  else
1216  Base = Addr;
1217  return true;
1218 }
1219 
1221  SDValue &ShAmt) {
1222  // Shift instructions on RISCV only read the lower 5 or 6 bits of the shift
1223  // amount. If there is an AND on the shift amount, we can bypass it if it
1224  // doesn't affect any of those bits.
1225  if (N.getOpcode() == ISD::AND && isa<ConstantSDNode>(N.getOperand(1))) {
1226  const APInt &AndMask = N->getConstantOperandAPInt(1);
1227 
1228  // Since the max shift amount is a power of 2 we can subtract 1 to make a
1229  // mask that covers the bits needed to represent all shift amounts.
1230  assert(isPowerOf2_32(ShiftWidth) && "Unexpected max shift amount!");
1231  APInt ShMask(AndMask.getBitWidth(), ShiftWidth - 1);
1232 
1233  if (ShMask.isSubsetOf(AndMask)) {
1234  ShAmt = N.getOperand(0);
1235  return true;
1236  }
1237 
1238  // SimplifyDemandedBits may have optimized the mask so try restoring any
1239  // bits that are known zero.
1240  KnownBits Known = CurDAG->computeKnownBits(N->getOperand(0));
1241  if (ShMask.isSubsetOf(AndMask | Known.Zero)) {
1242  ShAmt = N.getOperand(0);
1243  return true;
1244  }
1245  }
1246 
1247  ShAmt = N;
1248  return true;
1249 }
1250 
1252  if (N.getOpcode() == ISD::SIGN_EXTEND_INREG &&
1253  cast<VTSDNode>(N.getOperand(1))->getVT() == MVT::i32) {
1254  Val = N.getOperand(0);
1255  return true;
1256  }
1257  // FIXME: Should we just call computeNumSignBits here?
1258  if (N.getOpcode() == ISD::AssertSext &&
1259  cast<VTSDNode>(N->getOperand(1))->getVT().bitsLE(MVT::i32)) {
1260  Val = N;
1261  return true;
1262  }
1263  if (N.getOpcode() == ISD::AssertZext &&
1264  cast<VTSDNode>(N->getOperand(1))->getVT().bitsLT(MVT::i32)) {
1265  Val = N;
1266  return true;
1267  }
1268 
1269  return false;
1270 }
1271 
1273  if (N.getOpcode() == ISD::AND) {
1274  auto *C = dyn_cast<ConstantSDNode>(N.getOperand(1));
1275  if (C && C->getZExtValue() == UINT64_C(0xFFFFFFFF)) {
1276  Val = N.getOperand(0);
1277  return true;
1278  }
1279  }
1280  // FIXME: Should we just call computeKnownBits here?
1281  if (N.getOpcode() == ISD::AssertZext &&
1282  cast<VTSDNode>(N->getOperand(1))->getVT().bitsLE(MVT::i32)) {
1283  Val = N;
1284  return true;
1285  }
1286 
1287  return false;
1288 }
1289 
1290 // Check that it is a SLLIUW (Shift Logical Left Immediate Unsigned i32
1291 // on RV64).
1292 // SLLIUW is the same as SLLI except for the fact that it clears the bits
1293 // XLEN-1:32 of the input RS1 before shifting.
1294 // A PatFrag has already checked that it has the right structure:
1295 //
1296 // (AND (SHL RS1, VC2), VC1)
1297 //
1298 // We check that VC2, the shamt is less than 32, otherwise the pattern is
1299 // exactly the same as SLLI and we give priority to that.
1300 // Eventually we check that VC1, the mask used to clear the upper 32 bits
1301 // of RS1, is correct:
1302 //
1303 // VC1 == (0xFFFFFFFF << VC2)
1304 //
1306  assert(N->getOpcode() == ISD::AND);
1307  assert(N->getOperand(0).getOpcode() == ISD::SHL);
1308  assert(isa<ConstantSDNode>(N->getOperand(1)));
1309  assert(isa<ConstantSDNode>(N->getOperand(0).getOperand(1)));
1310 
1311  // The IsRV64 predicate is checked after PatFrag predicates so we can get
1312  // here even on RV32.
1313  if (!Subtarget->is64Bit())
1314  return false;
1315 
1316  SDValue Shl = N->getOperand(0);
1317  uint64_t VC1 = N->getConstantOperandVal(1);
1318  uint64_t VC2 = Shl.getConstantOperandVal(1);
1319 
1320  // Immediate range should be enforced by uimm5 predicate.
1321  assert(VC2 < 32 && "Unexpected immediate");
1322  return (VC1 >> VC2) == UINT64_C(0xFFFFFFFF);
1323 }
1324 
1325 // Select VL as a 5 bit immediate or a value that will become a register. This
1326 // allows us to choose betwen VSETIVLI or VSETVLI later.
1328  auto *C = dyn_cast<ConstantSDNode>(N);
1329  if (C && isUInt<5>(C->getZExtValue()))
1330  VL = CurDAG->getTargetConstant(C->getZExtValue(), SDLoc(N),
1331  N->getValueType(0));
1332  else
1333  VL = N;
1334 
1335  return true;
1336 }
1337 
1339  if (N.getOpcode() != ISD::SPLAT_VECTOR &&
1340  N.getOpcode() != RISCVISD::SPLAT_VECTOR_I64 &&
1341  N.getOpcode() != RISCVISD::VMV_V_X_VL)
1342  return false;
1343  SplatVal = N.getOperand(0);
1344  return true;
1345 }
1346 
1347 using ValidateFn = bool (*)(int64_t);
1348 
1349 static bool selectVSplatSimmHelper(SDValue N, SDValue &SplatVal,
1350  SelectionDAG &DAG,
1351  const RISCVSubtarget &Subtarget,
1352  ValidateFn ValidateImm) {
1353  if ((N.getOpcode() != ISD::SPLAT_VECTOR &&
1354  N.getOpcode() != RISCVISD::SPLAT_VECTOR_I64 &&
1355  N.getOpcode() != RISCVISD::VMV_V_X_VL) ||
1356  !isa<ConstantSDNode>(N.getOperand(0)))
1357  return false;
1358 
1359  int64_t SplatImm = cast<ConstantSDNode>(N.getOperand(0))->getSExtValue();
1360 
1361  // ISD::SPLAT_VECTOR, RISCVISD::SPLAT_VECTOR_I64 and RISCVISD::VMV_V_X_VL
1362  // share semantics when the operand type is wider than the resulting vector
1363  // element type: an implicit truncation first takes place. Therefore, perform
1364  // a manual truncation/sign-extension in order to ignore any truncated bits
1365  // and catch any zero-extended immediate.
1366  // For example, we wish to match (i8 -1) -> (XLenVT 255) as a simm5 by first
1367  // sign-extending to (XLenVT -1).
1368  MVT XLenVT = Subtarget.getXLenVT();
1369  assert(XLenVT == N.getOperand(0).getSimpleValueType() &&
1370  "Unexpected splat operand type");
1371  MVT EltVT = N.getSimpleValueType().getVectorElementType();
1372  if (EltVT.bitsLT(XLenVT))
1373  SplatImm = SignExtend64(SplatImm, EltVT.getSizeInBits());
1374 
1375  if (!ValidateImm(SplatImm))
1376  return false;
1377 
1378  SplatVal = DAG.getTargetConstant(SplatImm, SDLoc(N), XLenVT);
1379  return true;
1380 }
1381 
1383  return selectVSplatSimmHelper(N, SplatVal, *CurDAG, *Subtarget,
1384  [](int64_t Imm) { return isInt<5>(Imm); });
1385 }
1386 
1388  return selectVSplatSimmHelper(
1389  N, SplatVal, *CurDAG, *Subtarget,
1390  [](int64_t Imm) { return (isInt<5>(Imm) && Imm != -16) || Imm == 16; });
1391 }
1392 
1394  SDValue &SplatVal) {
1395  return selectVSplatSimmHelper(
1396  N, SplatVal, *CurDAG, *Subtarget, [](int64_t Imm) {
1397  return Imm != 0 && ((isInt<5>(Imm) && Imm != -16) || Imm == 16);
1398  });
1399 }
1400 
1402  if ((N.getOpcode() != ISD::SPLAT_VECTOR &&
1403  N.getOpcode() != RISCVISD::SPLAT_VECTOR_I64 &&
1404  N.getOpcode() != RISCVISD::VMV_V_X_VL) ||
1405  !isa<ConstantSDNode>(N.getOperand(0)))
1406  return false;
1407 
1408  int64_t SplatImm = cast<ConstantSDNode>(N.getOperand(0))->getSExtValue();
1409 
1410  if (!isUInt<5>(SplatImm))
1411  return false;
1412 
1413  SplatVal =
1414  CurDAG->getTargetConstant(SplatImm, SDLoc(N), Subtarget->getXLenVT());
1415 
1416  return true;
1417 }
1418 
1420  SDValue &Imm) {
1421  if (auto *C = dyn_cast<ConstantSDNode>(N)) {
1422  int64_t ImmVal = SignExtend64(C->getSExtValue(), Width);
1423 
1424  if (!isInt<5>(ImmVal))
1425  return false;
1426 
1427  Imm = CurDAG->getTargetConstant(ImmVal, SDLoc(N), Subtarget->getXLenVT());
1428  return true;
1429  }
1430 
1431  return false;
1432 }
1433 
1434 // Merge an ADDI into the offset of a load/store instruction where possible.
1435 // (load (addi base, off1), off2) -> (load base, off1+off2)
1436 // (store val, (addi base, off1), off2) -> (store val, base, off1+off2)
1437 // This is possible when off1+off2 fits a 12-bit immediate.
1438 void RISCVDAGToDAGISel::doPeepholeLoadStoreADDI() {
1440  ++Position;
1441 
1442  while (Position != CurDAG->allnodes_begin()) {
1443  SDNode *N = &*--Position;
1444  // Skip dead nodes and any non-machine opcodes.
1445  if (N->use_empty() || !N->isMachineOpcode())
1446  continue;
1447 
1448  int OffsetOpIdx;
1449  int BaseOpIdx;
1450 
1451  // Only attempt this optimisation for I-type loads and S-type stores.
1452  switch (N->getMachineOpcode()) {
1453  default:
1454  continue;
1455  case RISCV::LB:
1456  case RISCV::LH:
1457  case RISCV::LW:
1458  case RISCV::LBU:
1459  case RISCV::LHU:
1460  case RISCV::LWU:
1461  case RISCV::LD:
1462  case RISCV::FLH:
1463  case RISCV::FLW:
1464  case RISCV::FLD:
1465  BaseOpIdx = 0;
1466  OffsetOpIdx = 1;
1467  break;
1468  case RISCV::SB:
1469  case RISCV::SH:
1470  case RISCV::SW:
1471  case RISCV::SD:
1472  case RISCV::FSH:
1473  case RISCV::FSW:
1474  case RISCV::FSD:
1475  BaseOpIdx = 1;
1476  OffsetOpIdx = 2;
1477  break;
1478  }
1479 
1480  if (!isa<ConstantSDNode>(N->getOperand(OffsetOpIdx)))
1481  continue;
1482 
1483  SDValue Base = N->getOperand(BaseOpIdx);
1484 
1485  // If the base is an ADDI, we can merge it in to the load/store.
1486  if (!Base.isMachineOpcode() || Base.getMachineOpcode() != RISCV::ADDI)
1487  continue;
1488 
1489  SDValue ImmOperand = Base.getOperand(1);
1490  uint64_t Offset2 = N->getConstantOperandVal(OffsetOpIdx);
1491 
1492  if (auto *Const = dyn_cast<ConstantSDNode>(ImmOperand)) {
1493  int64_t Offset1 = Const->getSExtValue();
1494  int64_t CombinedOffset = Offset1 + Offset2;
1495  if (!isInt<12>(CombinedOffset))
1496  continue;
1497  ImmOperand = CurDAG->getTargetConstant(CombinedOffset, SDLoc(ImmOperand),
1498  ImmOperand.getValueType());
1499  } else if (auto *GA = dyn_cast<GlobalAddressSDNode>(ImmOperand)) {
1500  // If the off1 in (addi base, off1) is a global variable's address (its
1501  // low part, really), then we can rely on the alignment of that variable
1502  // to provide a margin of safety before off1 can overflow the 12 bits.
1503  // Check if off2 falls within that margin; if so off1+off2 can't overflow.
1504  const DataLayout &DL = CurDAG->getDataLayout();
1505  Align Alignment = GA->getGlobal()->getPointerAlignment(DL);
1506  if (Offset2 != 0 && Alignment <= Offset2)
1507  continue;
1508  int64_t Offset1 = GA->getOffset();
1509  int64_t CombinedOffset = Offset1 + Offset2;
1510  ImmOperand = CurDAG->getTargetGlobalAddress(
1511  GA->getGlobal(), SDLoc(ImmOperand), ImmOperand.getValueType(),
1512  CombinedOffset, GA->getTargetFlags());
1513  } else if (auto *CP = dyn_cast<ConstantPoolSDNode>(ImmOperand)) {
1514  // Ditto.
1515  Align Alignment = CP->getAlign();
1516  if (Offset2 != 0 && Alignment <= Offset2)
1517  continue;
1518  int64_t Offset1 = CP->getOffset();
1519  int64_t CombinedOffset = Offset1 + Offset2;
1520  ImmOperand = CurDAG->getTargetConstantPool(
1521  CP->getConstVal(), ImmOperand.getValueType(), CP->getAlign(),
1522  CombinedOffset, CP->getTargetFlags());
1523  } else {
1524  continue;
1525  }
1526 
1527  LLVM_DEBUG(dbgs() << "Folding add-immediate into mem-op:\nBase: ");
1528  LLVM_DEBUG(Base->dump(CurDAG));
1529  LLVM_DEBUG(dbgs() << "\nN: ");
1530  LLVM_DEBUG(N->dump(CurDAG));
1531  LLVM_DEBUG(dbgs() << "\n");
1532 
1533  // Modify the offset operand of the load/store.
1534  if (BaseOpIdx == 0) // Load
1535  CurDAG->UpdateNodeOperands(N, Base.getOperand(0), ImmOperand,
1536  N->getOperand(2));
1537  else // Store
1538  CurDAG->UpdateNodeOperands(N, N->getOperand(0), Base.getOperand(0),
1539  ImmOperand, N->getOperand(3));
1540 
1541  // The add-immediate may now be dead, in which case remove it.
1542  if (Base.getNode()->use_empty())
1543  CurDAG->RemoveDeadNode(Base.getNode());
1544  }
1545 }
1546 
1547 // This pass converts a legalized DAG into a RISCV-specific DAG, ready
1548 // for instruction scheduling.
1550  return new RISCVDAGToDAGISel(TM);
1551 }
llvm::RISCVII::LMUL_4
@ LMUL_4
Definition: RISCVBaseInfo.h:92
llvm::TargetMachine::getOptLevel
CodeGenOpt::Level getOptLevel() const
Returns the optimization level: None, Less, Default, or Aggressive.
Definition: TargetMachine.cpp:198
llvm::RISCVMatInt::Inst
Definition: RISCVMatInt.h:19
llvm::MVT::getVectorElementType
MVT getVectorElementType() const
Definition: MachineValueType.h:502
llvm::MVT::getStoreSize
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
Definition: MachineValueType.h:1024
llvm::ISD::INTRINSIC_VOID
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
Definition: ISDOpcodes.h:192
llvm::RISCVDAGToDAGISel::selectVLXSEG
void selectVLXSEG(SDNode *Node, bool IsMasked, bool IsOrdered)
Definition: RISCVISelDAGToDAG.cpp:248
MathExtras.h
llvm::SelectionDAGISel::TLI
const TargetLowering * TLI
Definition: SelectionDAGISel.h:53
llvm
Definition: AllocatorList.h:23
llvm::RISCVDAGToDAGISel::addVectorLoadStoreOperands
void addVectorLoadStoreOperands(SDNode *Node, unsigned SEWImm, const SDLoc &DL, unsigned CurOp, bool IsMasked, bool IsStridedOrIndexed, SmallVectorImpl< SDValue > &Operands, MVT *IndexVT=nullptr)
Definition: RISCVISelDAGToDAG.cpp:127
llvm::SelectionDAGISel::TM
TargetMachine & TM
Definition: SelectionDAGISel.h:41
llvm::RISCV::VLSEGPseudo
Definition: RISCVISelDAGToDAG.h:95
llvm::SDLoc
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Definition: SelectionDAGNodes.h:1078
llvm::MVT::isFixedLengthVector
bool isFixedLengthVector() const
Definition: MachineValueType.h:361
llvm::RISCVDAGToDAGISel::selectVSplatSimm5Plus1
bool selectVSplatSimm5Plus1(SDValue N, SDValue &SplatVal)
Definition: RISCVISelDAGToDAG.cpp:1387
llvm::DataLayout
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:112
llvm::ISD::BITCAST
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
Definition: ISDOpcodes.h:838
Insert
Vector Rotate Left Mask Mask Insert
Definition: README_P9.txt:112
llvm::RISCVSubtarget::getTargetLowering
const RISCVTargetLowering * getTargetLowering() const override
Definition: RISCVSubtarget.h:97
llvm::ISD::AssertSext
@ AssertSext
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
Definition: ISDOpcodes.h:59
llvm::SelectionDAG::getCopyToReg
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, unsigned Reg, SDValue N)
Definition: SelectionDAG.h:735
llvm::RISCV::VLXSEGPseudo
Definition: RISCVISelDAGToDAG.h:105
llvm::SDValue::getNode
SDNode * getNode() const
get the SDNode which holds the desired result
Definition: SelectionDAGNodes.h:152
llvm::RISCVDAGToDAGISel::selectZExti32
bool selectZExti32(SDValue N, SDValue &Val)
Definition: RISCVISelDAGToDAG.cpp:1272
P
This currently compiles esp xmm0 movsd esp eax eax esp ret We should use not the dag combiner This is because dagcombine2 needs to be able to see through the X86ISD::Wrapper which DAGCombine can t really do The code for turning x load into a single vector load is target independent and should be moved to the dag combiner The code for turning x load into a vector load can only handle a direct load from a global or a direct load from the stack It should be generalized to handle any load from P
Definition: README-SSE.txt:411
llvm::ARM_MB::LD
@ LD
Definition: ARMBaseInfo.h:72
llvm::KnownBits::Zero
APInt Zero
Definition: KnownBits.h:24
llvm::MVT::bitsLT
bool bitsLT(MVT VT) const
Return true if this has less bits than VT.
Definition: MachineValueType.h:1080
llvm::SmallVector
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1168
llvm::MachineSDNode
An SDNode that represents everything that will be needed to construct a MachineInstr.
Definition: SelectionDAGNodes.h:2518
llvm::SelectionDAG::allnodes_begin
allnodes_const_iterator allnodes_begin() const
Definition: SelectionDAG.h:493
llvm::SelectionDAG::getRoot
const SDValue & getRoot() const
Return the root tag of the SelectionDAG.
Definition: SelectionDAG.h:513
llvm::SPII::Load
@ Load
Definition: SparcInstrInfo.h:32
llvm::SDNode
Represents one node in the SelectionDAG.
Definition: SelectionDAGNodes.h:455
llvm::RISCVTargetMachine
Definition: RISCVTargetMachine.h:23
llvm::RISCVDAGToDAGISel::selectVSplat
bool selectVSplat(SDValue N, SDValue &SplatVal)
Definition: RISCVISelDAGToDAG.cpp:1338
llvm::MVT::Glue
@ Glue
Definition: MachineValueType.h:249
llvm::MemOp
Definition: TargetLowering.h:111
llvm::RISCVDAGToDAGISel
Definition: RISCVISelDAGToDAG.h:23
llvm::RISCVII::LMUL_1
@ LMUL_1
Definition: RISCVBaseInfo.h:90
llvm::APInt::getBitWidth
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition: APInt.h:1581
llvm::RISCV::VLX_VSXPseudo
Definition: RISCVISelDAGToDAG.h:151
Offset
uint64_t Offset
Definition: ELFObjHandler.cpp:81
llvm::RISCVTargetLowering::getRegClassIDForVecVT
static unsigned getRegClassIDForVecVT(MVT VT)
Definition: RISCVISelLowering.cpp:1103
llvm::isPowerOf2_32
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition: MathExtras.h:491
llvm::RISCVII::LMUL_2
@ LMUL_2
Definition: RISCVBaseInfo.h:91
llvm::RISCVDAGToDAGISel::selectVSSEG
void selectVSSEG(SDNode *Node, bool IsMasked, bool IsStrided)
Definition: RISCVISelDAGToDAG.cpp:295
RISCVMatInt.h
llvm::BitmaskEnumDetail::Mask
std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
Definition: BitmaskEnum.h:80
TRI
unsigned const TargetRegisterInfo * TRI
Definition: MachineSink.cpp:1567
LLVM_DEBUG
#define LLVM_DEBUG(X)
Definition: Debug.h:122
llvm::RISCVDAGToDAGISel::selectVSplatSimm5Plus1NonZero
bool selectVSplatSimm5Plus1NonZero(SDValue N, SDValue &SplatVal)
Definition: RISCVISelDAGToDAG.cpp:1393
llvm::RISCVDAGToDAGISel::SelectBaseAddr
bool SelectBaseAddr(SDValue Addr, SDValue &Base)
Definition: RISCVISelDAGToDAG.cpp:1210
KnownBits.h
llvm::MVT::isScalableVector
bool isScalableVector() const
Return true if this is a vector value type where the runtime length is machine dependent.
Definition: MachineValueType.h:356
llvm::SelectionDAG::getRegister
SDValue getRegister(unsigned Reg, EVT VT)
Definition: SelectionDAG.cpp:1953
llvm::dbgs
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:132
llvm::RISCVSubtarget::is64Bit
bool is64Bit() const
Definition: RISCVSubtarget.h:125
llvm::RISCV::VSSEGPseudo
Definition: RISCVISelDAGToDAG.h:115
llvm::EVT::getStoreSize
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
Definition: ValueTypes.h:355
llvm::RISCVDAGToDAGISel::selectShiftMask
bool selectShiftMask(SDValue N, unsigned ShiftWidth, SDValue &ShAmt)
Definition: RISCVISelDAGToDAG.cpp:1220
llvm::SelectionDAG::getTargetFrameIndex
SDValue getTargetFrameIndex(int FI, EVT VT)
Definition: SelectionDAG.h:688
llvm::SDValue::getValueType
EVT getValueType() const
Return the ValueType of the referenced return value.
Definition: SelectionDAGNodes.h:1113
llvm::SelectionDAG
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
Definition: SelectionDAG.h:216
llvm::SelectionDAG::UpdateNodeOperands
SDNode * UpdateNodeOperands(SDNode *N, SDValue Op)
Mutate the specified node in-place to have the specified operands.
Definition: SelectionDAG.cpp:8081
llvm::ISD::Constant
@ Constant
Definition: ISDOpcodes.h:69
llvm::ISD::SIGN_EXTEND_INREG
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
Definition: ISDOpcodes.h:737
llvm::RISCVISD::SPLAT_VECTOR_I64
@ SPLAT_VECTOR_I64
Definition: RISCVISelLowering.h:126
createTuple
static SDValue createTuple(SelectionDAG &CurDAG, ArrayRef< SDValue > Regs, unsigned NF, RISCVII::VLMUL LMUL)
Definition: RISCVISelDAGToDAG.cpp:110
llvm::EVT
Extended Value Type.
Definition: ValueTypes.h:35
C
(vector float) vec_cmpeq(*A, *B) C
Definition: README_ALTIVEC.txt:86
llvm::RISCVMatInt::generateInstSeq
InstSeq generateInstSeq(int64_t Val, bool IsRV64)
Definition: RISCVMatInt.cpp:78
llvm::MVT::getScalarSizeInBits
uint64_t getScalarSizeInBits() const
Definition: MachineValueType.h:1014
llvm::RISCVTargetLowering::getSubregIndexByMVT
static unsigned getSubregIndexByMVT(MVT VT, unsigned Index)
Definition: RISCVISelLowering.cpp:1080
llvm::RISCVDAGToDAGISel::SelectAddrFI
bool SelectAddrFI(SDValue Addr, SDValue &Base)
Definition: RISCVISelDAGToDAG.cpp:1202
llvm::RISCVSubtarget::getXLenVT
MVT getXLenVT() const
Definition: RISCVSubtarget.h:130
RISCVISelDAGToDAG.h
llvm::SelectionDAGISel::ReplaceNode
void ReplaceNode(SDNode *F, SDNode *T)
Replace all uses of F with T, then remove F from the DAG.
Definition: SelectionDAGISel.h:227
llvm::Log2_32
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition: MathExtras.h:596
llvm::RISCVVType::decodeVSEW
static unsigned decodeVSEW(unsigned VSEW)
Definition: RISCVBaseInfo.h:329
RISCVMCTargetDesc.h
llvm::SDNode::use_empty
bool use_empty() const
Return true if there are no uses of this node.
Definition: SelectionDAGNodes.h:689
llvm::RISCVDAGToDAGISel::SelectInlineAsmMemoryOperand
bool SelectInlineAsmMemoryOperand(const SDValue &Op, unsigned ConstraintID, std::vector< SDValue > &OutOps) override
SelectInlineAsmMemoryOperand - Select the specified address as a target addressing mode,...
Definition: RISCVISelDAGToDAG.cpp:1184
createM1Tuple
static SDValue createM1Tuple(SelectionDAG &CurDAG, ArrayRef< SDValue > Regs, unsigned NF)
Definition: RISCVISelDAGToDAG.cpp:85
llvm::RISCVSubtarget::hasStdExtZbb
bool hasStdExtZbb() const
Definition: RISCVSubtarget.h:111
llvm::ISD::AND
@ AND
Bitwise operators - logical and, logical or, logical xor.
Definition: ISDOpcodes.h:622
llvm::ISD::SPLAT_VECTOR
@ SPLAT_VECTOR
SPLAT_VECTOR(VAL) - Returns a vector with the scalar value VAL duplicated in all lanes.
Definition: ISDOpcodes.h:584
llvm::RISCVSubtarget::hasStdExtZbp
bool hasStdExtZbp() const
Definition: RISCVSubtarget.h:116
llvm::Align
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
llvm::SDValue::getConstantOperandVal
uint64_t getConstantOperandVal(unsigned i) const
Definition: SelectionDAGNodes.h:1125
llvm::SelectionDAG::getTargetGlobalAddress
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
Definition: SelectionDAG.h:683
Operands
mir Rename Register Operands
Definition: MIRNamerPass.cpp:78
llvm::APInt::isSubsetOf
bool isSubsetOf(const APInt &RHS) const
This operation checks that all bits set in this APInt are also set in RHS.
Definition: APInt.h:1349
createM2Tuple
static SDValue createM2Tuple(SelectionDAG &CurDAG, ArrayRef< SDValue > Regs, unsigned NF)
Definition: RISCVISelDAGToDAG.cpp:95
llvm::SelectionDAG::RemoveDeadNode
void RemoveDeadNode(SDNode *N)
Remove the specified node from the system.
Definition: SelectionDAG.cpp:855
llvm::RISCV::VSEPseudo
Definition: RISCVISelDAGToDAG.h:143
llvm::RISCVDAGToDAGISel::selectVLOp
bool selectVLOp(SDValue N, SDValue &VL)
Definition: RISCVISelDAGToDAG.cpp:1327
llvm::RISCVDAGToDAGISel::selectVSXSEG
void selectVSXSEG(SDNode *Node, bool IsMasked, bool IsOrdered)
Definition: RISCVISelDAGToDAG.cpp:327
llvm::RISCVII::LMUL_F4
@ LMUL_F4
Definition: RISCVBaseInfo.h:96
llvm::SelectionDAGISel::IsProfitableToFold
virtual bool IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const
IsProfitableToFold - Returns true if it's profitable to fold the specific operand node N of U during ...
Definition: SelectionDAGISel.cpp:2194
llvm::RISCVII::VLMUL
VLMUL
Definition: RISCVBaseInfo.h:89
selectVSplatSimmHelper
static bool selectVSplatSimmHelper(SDValue N, SDValue &SplatVal, SelectionDAG &DAG, const RISCVSubtarget &Subtarget, ValidateFn ValidateImm)
Definition: RISCVISelDAGToDAG.cpp:1349
llvm::RISCVDAGToDAGISel::selectVSplatUimm5
bool selectVSplatUimm5(SDValue N, SDValue &SplatVal)
Definition: RISCVISelDAGToDAG.cpp:1401
llvm::RISCVDAGToDAGISel::MatchSLLIUW
bool MatchSLLIUW(SDNode *N) const
Definition: RISCVISelDAGToDAG.cpp:1305
Addr
uint64_t Addr
Definition: ELFObjHandler.cpp:80
llvm::ISD::AssertZext
@ AssertZext
Definition: ISDOpcodes.h:60
llvm::SelectionDAG::getCopyFromReg
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, unsigned Reg, EVT VT)
Definition: SelectionDAG.h:761
llvm::SelectionDAGISel::IsLegalToFold
static bool IsLegalToFold(SDValue N, SDNode *U, SDNode *Root, CodeGenOpt::Level OptLevel, bool IgnoreChains=false)
IsLegalToFold - Returns true if the specific operand node N of U can be folded during instruction sel...
Definition: SelectionDAGISel.cpp:2202
llvm::SDNode::getOperand
const SDValue & getOperand(unsigned Num) const
Definition: SelectionDAGNodes.h:896
I
#define I(x, y, z)
Definition: MD5.cpp:59
llvm::SDNode::dump
void dump() const
Dump this node, for debugging.
Definition: SelectionDAGDumper.cpp:537
llvm::RISCVII::LMUL_F8
@ LMUL_F8
Definition: RISCVBaseInfo.h:95
llvm::RISCVSubtarget
Definition: RISCVSubtarget.h:35
llvm::SDValue::getValue
SDValue getValue(unsigned R) const
Definition: SelectionDAGNodes.h:172
llvm::RISCVDAGToDAGISel::selectVLSEG
void selectVLSEG(SDNode *Node, bool IsMasked, bool IsStrided)
Definition: RISCVISelDAGToDAG.cpp:163
assert
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
createTupleImpl
static SDValue createTupleImpl(SelectionDAG &CurDAG, ArrayRef< SDValue > Regs, unsigned RegClassID, unsigned SubReg0)
Definition: RISCVISelDAGToDAG.cpp:67
llvm::MVT::Other
@ Other
Definition: MachineValueType.h:39
llvm::MVT::getSizeInBits
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
Definition: MachineValueType.h:823
llvm::SelectionDAGISel::CurDAG
SelectionDAG * CurDAG
Definition: SelectionDAGISel.h:47
llvm::SelectionDAG::getMachineNode
MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s),...
Definition: SelectionDAG.cpp:8429
llvm::InlineAsm::Constraint_A
@ Constraint_A
Definition: InlineAsm.h:250
llvm::MVT
Machine Value Type.
Definition: MachineValueType.h:30
llvm::RISCVDAGToDAGISel::selectVSplatSimm5
bool selectVSplatSimm5(SDValue N, SDValue &SplatVal)
Definition: RISCVISelDAGToDAG.cpp:1382
llvm::SelectionDAG::setNodeMemRefs
void setNodeMemRefs(MachineSDNode *N, ArrayRef< MachineMemOperand * > NewMemRefs)
Mutate the specified machine node's memory references to the provided list.
Definition: SelectionDAG.cpp:8197
llvm::APInt
Class for arbitrary precision integers.
Definition: APInt.h:70
llvm::RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs
static std::pair< unsigned, unsigned > decomposeSubvectorInsertExtractToSubRegs(MVT VecVT, MVT SubVecVT, unsigned InsertExtractIdx, const RISCVRegisterInfo *TRI)
Definition: RISCVISelLowering.cpp:1115
llvm::SelectionDAG::getTargetConstantPool
SDValue getTargetConstantPool(const Constant *C, EVT VT, MaybeAlign Align=None, int Offset=0, unsigned TargetFlags=0)
Definition: SelectionDAG.h:699
llvm::ArrayRef
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: APInt.h:32
llvm::createRISCVISelDag
FunctionPass * createRISCVISelDag(RISCVTargetMachine &TM)
Definition: RISCVISelDAGToDAG.cpp:1549
llvm::MVT::i64
@ i64
Definition: MachineValueType.h:44
llvm::SelectionDAG::getTargetInsertSubreg
SDValue getTargetInsertSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand, SDValue Subreg)
A convenience function for creating TargetInstrInfo::INSERT_SUBREG nodes.
Definition: SelectionDAG.cpp:8557
llvm::RISCVISD::VMV_V_X_VL
@ VMV_V_X_VL
Definition: RISCVISelLowering.h:113
llvm::RISCVSubtarget::getRegisterInfo
const RISCVRegisterInfo * getRegisterInfo() const override
Definition: RISCVSubtarget.h:94
llvm::SDValue::getMachineOpcode
unsigned getMachineOpcode() const
Definition: SelectionDAGNodes.h:1145
llvm_unreachable
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Definition: ErrorHandling.h:136
llvm::X86ISD::FLD
@ FLD
This instruction implements an extending load to FP stack slots.
Definition: X86ISelLowering.h:813
llvm::SDValue::getOperand
const SDValue & getOperand(unsigned i) const
Definition: SelectionDAGNodes.h:1121
DL
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Definition: AArch64SLSHardening.cpp:76
llvm::SDValue::hasOneUse
bool hasOneUse() const
Return true if there is exactly one node using value ResNo of Node.
Definition: SelectionDAGNodes.h:1157
llvm::SDValue::getSimpleValueType
MVT getSimpleValueType() const
Return the simple ValueType of the referenced return value.
Definition: SelectionDAGNodes.h:183
selectImm
static SDNode * selectImm(SelectionDAG *CurDAG, const SDLoc &DL, int64_t Imm, MVT XLenVT)
Definition: RISCVISelDAGToDAG.cpp:47
llvm::SignExtend64
constexpr int64_t SignExtend64(uint64_t x)
Sign-extend the number in the bottom B bits of X to a 64-bit integer.
Definition: MathExtras.h:777
llvm::SPII::Store
@ Store
Definition: SparcInstrInfo.h:33
llvm::ISD::INTRINSIC_WO_CHAIN
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
Definition: ISDOpcodes.h:177
llvm::MVT::getVectorElementCount
ElementCount getVectorElementCount() const
Definition: MachineValueType.h:809
createM4Tuple
static SDValue createM4Tuple(SelectionDAG &CurDAG, ArrayRef< SDValue > Regs, unsigned NF)
Definition: RISCVISelDAGToDAG.cpp:104
llvm::ISD::FrameIndex
@ FrameIndex
Definition: ISDOpcodes.h:73
llvm::ISD::INSERT_SUBVECTOR
@ INSERT_SUBVECTOR
INSERT_SUBVECTOR(VECTOR1, VECTOR2, IDX) - Returns a vector with VECTOR2 inserted into VECTOR1.
Definition: ISDOpcodes.h:529
llvm::HexagonISD::CP
@ CP
Definition: HexagonISelLowering.h:53
Alignment.h
llvm::SelectionDAG::computeKnownBits
KnownBits computeKnownBits(SDValue Op, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in Known.
Definition: SelectionDAG.cpp:2725
llvm::KnownBits
Definition: KnownBits.h:23
llvm::ISD::EXTRACT_SUBVECTOR
@ EXTRACT_SUBVECTOR
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR.
Definition: ISDOpcodes.h:543
llvm::AMDGPU::SendMsg::Op
Op
Definition: SIDefines.h:314
llvm::RISCV::VSXSEGPseudo
Definition: RISCVISelDAGToDAG.h:124
RISCVISelLowering.h
llvm::RISCVDAGToDAGISel::PostprocessISelDAG
void PostprocessISelDAG() override
PostprocessISelDAG() - This hook allows the target to hack on the graph right after selection.
Definition: RISCVISelDAGToDAG.cpp:43
llvm::ilist_iterator
Iterator for intrusive lists based on ilist_node.
Definition: ilist_iterator.h:57
MachineFrameInfo.h
llvm::SelectionDAG::getEntryNode
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
Definition: SelectionDAG.h:516
llvm::RISCVVType::encodeVTYPE
unsigned encodeVTYPE(RISCVII::VLMUL VLMUL, unsigned SEW, bool TailAgnostic, bool MaskAgnostic)
Definition: RISCVBaseInfo.cpp:110
llvm::SelectionDAG::getDataLayout
const DataLayout & getDataLayout() const
Definition: SelectionDAG.h:440
llvm::RISCVDAGToDAGISel::selectVLSEGFF
void selectVLSEGFF(SDNode *Node, bool IsMasked)
Definition: RISCVISelDAGToDAG.cpp:204
llvm::SelectionDAG::getTargetExtractSubreg
SDValue getTargetExtractSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand)
A convenience function for creating TargetInstrInfo::EXTRACT_SUBREG nodes.
Definition: SelectionDAG.cpp:8547
llvm::SelectionDAGISel::ReplaceUses
void ReplaceUses(SDValue F, SDValue T)
ReplaceUses - replace all uses of the old node F with the use of the new node T.
Definition: SelectionDAGISel.h:206
llvm::RISCVII::LMUL_8
@ LMUL_8
Definition: RISCVBaseInfo.h:93
llvm::MVT::i32
@ i32
Definition: MachineValueType.h:43
llvm::RISCVSubtarget::getXLen
unsigned getXLen() const
Definition: RISCVSubtarget.h:131
llvm::SDValue
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
Definition: SelectionDAGNodes.h:138
llvm::InlineAsm::Constraint_m
@ Constraint_m
Definition: InlineAsm.h:247
llvm::RISCVTargetLowering
Definition: RISCVISelLowering.h:269
llvm::XCoreISD::LMUL
@ LMUL
Definition: XCoreISelLowering.h:59
llvm::RISCVTargetLowering::getLMUL
static RISCVII::VLMUL getLMUL(MVT VT)
Definition: RISCVISelLowering.cpp:1036
llvm::AMDGPU::Hwreg::Width
Width
Definition: SIDefines.h:403
llvm::RISCVISD::VFMV_V_F_VL
@ VFMV_V_F_VL
Definition: RISCVISelLowering.h:116
llvm::SDValue::isUndef
bool isUndef() const
Definition: SelectionDAGNodes.h:1149
llvm::ISD::SHL
@ SHL
Shift and rotation operations.
Definition: ISDOpcodes.h:647
N
#define N
llvm::RISCVII::LMUL_F2
@ LMUL_F2
Definition: RISCVBaseInfo.h:97
llvm::ISD::SRL
@ SRL
Definition: ISDOpcodes.h:649
llvm::RISCVDAGToDAGISel::selectRVVSimm5
bool selectRVVSimm5(SDValue N, unsigned Width, SDValue &Imm)
Definition: RISCVISelDAGToDAG.cpp:1419
llvm::ArrayRef::size
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:163
llvm::RISCVDAGToDAGISel::Select
void Select(SDNode *Node) override
Main hook for targets to transform nodes into machine nodes.
Definition: RISCVISelDAGToDAG.cpp:365
llvm::MVT::Untyped
@ Untyped
Definition: MachineValueType.h:253
llvm::SmallVectorImpl
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:43
llvm::SDValue::getOpcode
unsigned getOpcode() const
Definition: SelectionDAGNodes.h:1109
llvm::SelectionDAG::getTargetConstant
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
Definition: SelectionDAG.h:637
TM
const char LLVMTargetMachineRef TM
Definition: PassBuilderBindings.cpp:47
llvm::FunctionPass
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:298
llvm::ISD::INTRINSIC_W_CHAIN
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
Definition: ISDOpcodes.h:185
raw_ostream.h
llvm::SDValue::isMachineOpcode
bool isMachineOpcode() const
Definition: SelectionDAGNodes.h:1141
llvm::RISCV::VLEPseudo
Definition: RISCVISelDAGToDAG.h:134
ValidateFn
bool(*)(int64_t) ValidateFn
Definition: RISCVISelDAGToDAG.cpp:1347
Debug.h
llvm::RISCVDAGToDAGISel::selectSExti32
bool selectSExti32(SDValue N, SDValue &Val)
Definition: RISCVISelDAGToDAG.cpp:1251
llvm::RISCVSubtarget::hasStdExtV
bool hasStdExtV() const
Definition: RISCVSubtarget.h:121