LLVM  15.0.0git
RISCVISelDAGToDAG.cpp
Go to the documentation of this file.
1 //===-- RISCVISelDAGToDAG.cpp - A dag to dag inst selector for RISCV ------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines an instruction selector for the RISCV target.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "RISCVISelDAGToDAG.h"
16 #include "RISCVISelLowering.h"
19 #include "llvm/IR/IntrinsicsRISCV.h"
20 #include "llvm/Support/Alignment.h"
21 #include "llvm/Support/Debug.h"
22 #include "llvm/Support/KnownBits.h"
25 
26 using namespace llvm;
27 
28 #define DEBUG_TYPE "riscv-isel"
29 
30 namespace llvm {
31 namespace RISCV {
32 #define GET_RISCVVSSEGTable_IMPL
33 #define GET_RISCVVLSEGTable_IMPL
34 #define GET_RISCVVLXSEGTable_IMPL
35 #define GET_RISCVVSXSEGTable_IMPL
36 #define GET_RISCVVLETable_IMPL
37 #define GET_RISCVVSETable_IMPL
38 #define GET_RISCVVLXTable_IMPL
39 #define GET_RISCVVSXTable_IMPL
40 #define GET_RISCVMaskedPseudosTable_IMPL
41 #include "RISCVGenSearchableTables.inc"
42 } // namespace RISCV
43 } // namespace llvm
44 
47  E = CurDAG->allnodes_end();
48  I != E;) {
49  SDNode *N = &*I++; // Preincrement iterator to avoid invalidation issues.
50 
51  // Convert integer SPLAT_VECTOR to VMV_V_X_VL and floating-point
52  // SPLAT_VECTOR to VFMV_V_F_VL to reduce isel burden.
53  if (N->getOpcode() == ISD::SPLAT_VECTOR) {
54  MVT VT = N->getSimpleValueType(0);
55  unsigned Opc =
57  SDLoc DL(N);
58  SDValue VL = CurDAG->getRegister(RISCV::X0, Subtarget->getXLenVT());
59  SDValue Result = CurDAG->getNode(Opc, DL, VT, CurDAG->getUNDEF(VT),
60  N->getOperand(0), VL);
61 
62  --I;
64  ++I;
66  continue;
67  }
68 
69  // Lower SPLAT_VECTOR_SPLIT_I64 to two scalar stores and a stride 0 vector
70  // load. Done after lowering and combining so that we have a chance to
71  // optimize this to VMV_V_X_VL when the upper bits aren't needed.
72  if (N->getOpcode() != RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL)
73  continue;
74 
75  assert(N->getNumOperands() == 4 && "Unexpected number of operands");
76  MVT VT = N->getSimpleValueType(0);
77  SDValue Passthru = N->getOperand(0);
78  SDValue Lo = N->getOperand(1);
79  SDValue Hi = N->getOperand(2);
80  SDValue VL = N->getOperand(3);
82  Lo.getValueType() == MVT::i32 && Hi.getValueType() == MVT::i32 &&
83  "Unexpected VTs!");
86  SDLoc DL(N);
87 
88  // We use the same frame index we use for moving two i32s into 64-bit FPR.
89  // This is an analogous operation.
90  int FI = FuncInfo->getMoveF64FrameIndex(MF);
93  SDValue StackSlot =
95 
96  SDValue Chain = CurDAG->getEntryNode();
97  Lo = CurDAG->getStore(Chain, DL, Lo, StackSlot, MPI, Align(8));
98 
99  SDValue OffsetSlot =
101  Hi = CurDAG->getStore(Chain, DL, Hi, OffsetSlot, MPI.getWithOffset(4),
102  Align(8));
103 
104  Chain = CurDAG->getNode(ISD::TokenFactor, DL, MVT::Other, Lo, Hi);
105 
106  SDVTList VTs = CurDAG->getVTList({VT, MVT::Other});
107  SDValue IntID =
108  CurDAG->getTargetConstant(Intrinsic::riscv_vlse, DL, MVT::i64);
109  SDValue Ops[] = {Chain,
110  IntID,
111  Passthru,
112  StackSlot,
113  CurDAG->getRegister(RISCV::X0, MVT::i64),
114  VL};
115 
117  ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, MVT::i64, MPI, Align(8),
119 
120  // We're about to replace all uses of the SPLAT_VECTOR_SPLIT_I64 with the
121  // vlse we created. This will cause general havok on the dag because
122  // anything below the conversion could be folded into other existing nodes.
123  // To avoid invalidating 'I', back it up to the convert node.
124  --I;
126 
127  // Now that we did that, the node is dead. Increment the iterator to the
128  // next node to process, then delete N.
129  ++I;
130  CurDAG->DeleteNode(N);
131  }
132 }
133 
137 
138  bool MadeChange = false;
139  while (Position != CurDAG->allnodes_begin()) {
140  SDNode *N = &*--Position;
141  // Skip dead nodes and any non-machine opcodes.
142  if (N->use_empty() || !N->isMachineOpcode())
143  continue;
144 
145  MadeChange |= doPeepholeSExtW(N);
146  MadeChange |= doPeepholeLoadStoreADDI(N);
147  MadeChange |= doPeepholeMaskedRVV(N);
148  }
149 
150  CurDAG->setRoot(Dummy.getValue());
151 
152  if (MadeChange)
154 }
155 
156 // Returns true if N is a MachineSDNode that has a reg and simm12 memory
157 // operand. The indices of the base pointer and offset are returned in BaseOpIdx
158 // and OffsetOpIdx.
159 static bool hasMemOffset(SDNode *N, unsigned &BaseOpIdx,
160  unsigned &OffsetOpIdx) {
161  switch (N->getMachineOpcode()) {
162  case RISCV::LB:
163  case RISCV::LH:
164  case RISCV::LW:
165  case RISCV::LBU:
166  case RISCV::LHU:
167  case RISCV::LWU:
168  case RISCV::LD:
169  case RISCV::FLH:
170  case RISCV::FLW:
171  case RISCV::FLD:
172  BaseOpIdx = 0;
173  OffsetOpIdx = 1;
174  return true;
175  case RISCV::SB:
176  case RISCV::SH:
177  case RISCV::SW:
178  case RISCV::SD:
179  case RISCV::FSH:
180  case RISCV::FSW:
181  case RISCV::FSD:
182  BaseOpIdx = 1;
183  OffsetOpIdx = 2;
184  return true;
185  }
186 
187  return false;
188 }
189 
190 static SDNode *selectImm(SelectionDAG *CurDAG, const SDLoc &DL, const MVT VT,
191  int64_t Imm, const RISCVSubtarget &Subtarget) {
192  MVT XLenVT = Subtarget.getXLenVT();
194  RISCVMatInt::generateInstSeq(Imm, Subtarget.getFeatureBits());
195 
196  SDNode *Result = nullptr;
197  SDValue SrcReg = CurDAG->getRegister(RISCV::X0, XLenVT);
198  for (RISCVMatInt::Inst &Inst : Seq) {
199  SDValue SDImm = CurDAG->getTargetConstant(Inst.Imm, DL, XLenVT);
200  switch (Inst.getOpndKind()) {
201  case RISCVMatInt::Imm:
202  Result = CurDAG->getMachineNode(Inst.Opc, DL, XLenVT, SDImm);
203  break;
204  case RISCVMatInt::RegX0:
205  Result = CurDAG->getMachineNode(Inst.Opc, DL, XLenVT, SrcReg,
206  CurDAG->getRegister(RISCV::X0, XLenVT));
207  break;
208  case RISCVMatInt::RegReg:
209  Result = CurDAG->getMachineNode(Inst.Opc, DL, XLenVT, SrcReg, SrcReg);
210  break;
211  case RISCVMatInt::RegImm:
212  Result = CurDAG->getMachineNode(Inst.Opc, DL, XLenVT, SrcReg, SDImm);
213  break;
214  }
215 
216  // Only the first instruction has X0 as its source.
217  SrcReg = SDValue(Result, 0);
218  }
219 
220  return Result;
221 }
222 
224  unsigned NF, RISCVII::VLMUL LMUL) {
225  static const unsigned M1TupleRegClassIDs[] = {
226  RISCV::VRN2M1RegClassID, RISCV::VRN3M1RegClassID, RISCV::VRN4M1RegClassID,
227  RISCV::VRN5M1RegClassID, RISCV::VRN6M1RegClassID, RISCV::VRN7M1RegClassID,
228  RISCV::VRN8M1RegClassID};
229  static const unsigned M2TupleRegClassIDs[] = {RISCV::VRN2M2RegClassID,
230  RISCV::VRN3M2RegClassID,
231  RISCV::VRN4M2RegClassID};
232 
233  assert(Regs.size() >= 2 && Regs.size() <= 8);
234 
235  unsigned RegClassID;
236  unsigned SubReg0;
237  switch (LMUL) {
238  default:
239  llvm_unreachable("Invalid LMUL.");
244  static_assert(RISCV::sub_vrm1_7 == RISCV::sub_vrm1_0 + 7,
245  "Unexpected subreg numbering");
246  SubReg0 = RISCV::sub_vrm1_0;
247  RegClassID = M1TupleRegClassIDs[NF - 2];
248  break;
250  static_assert(RISCV::sub_vrm2_3 == RISCV::sub_vrm2_0 + 3,
251  "Unexpected subreg numbering");
252  SubReg0 = RISCV::sub_vrm2_0;
253  RegClassID = M2TupleRegClassIDs[NF - 2];
254  break;
256  static_assert(RISCV::sub_vrm4_1 == RISCV::sub_vrm4_0 + 1,
257  "Unexpected subreg numbering");
258  SubReg0 = RISCV::sub_vrm4_0;
259  RegClassID = RISCV::VRN2M4RegClassID;
260  break;
261  }
262 
263  SDLoc DL(Regs[0]);
265 
266  Ops.push_back(CurDAG.getTargetConstant(RegClassID, DL, MVT::i32));
267 
268  for (unsigned I = 0; I < Regs.size(); ++I) {
269  Ops.push_back(Regs[I]);
270  Ops.push_back(CurDAG.getTargetConstant(SubReg0 + I, DL, MVT::i32));
271  }
272  SDNode *N =
273  CurDAG.getMachineNode(TargetOpcode::REG_SEQUENCE, DL, MVT::Untyped, Ops);
274  return SDValue(N, 0);
275 }
276 
278  SDNode *Node, unsigned Log2SEW, const SDLoc &DL, unsigned CurOp,
279  bool IsMasked, bool IsStridedOrIndexed, SmallVectorImpl<SDValue> &Operands,
280  bool IsLoad, MVT *IndexVT) {
281  SDValue Chain = Node->getOperand(0);
282  SDValue Glue;
283 
284  SDValue Base;
285  SelectBaseAddr(Node->getOperand(CurOp++), Base);
286  Operands.push_back(Base); // Base pointer.
287 
288  if (IsStridedOrIndexed) {
289  Operands.push_back(Node->getOperand(CurOp++)); // Index.
290  if (IndexVT)
291  *IndexVT = Operands.back()->getSimpleValueType(0);
292  }
293 
294  if (IsMasked) {
295  // Mask needs to be copied to V0.
296  SDValue Mask = Node->getOperand(CurOp++);
297  Chain = CurDAG->getCopyToReg(Chain, DL, RISCV::V0, Mask, SDValue());
298  Glue = Chain.getValue(1);
299  Operands.push_back(CurDAG->getRegister(RISCV::V0, Mask.getValueType()));
300  }
301  SDValue VL;
302  selectVLOp(Node->getOperand(CurOp++), VL);
303  Operands.push_back(VL);
304 
305  MVT XLenVT = Subtarget->getXLenVT();
306  SDValue SEWOp = CurDAG->getTargetConstant(Log2SEW, DL, XLenVT);
307  Operands.push_back(SEWOp);
308 
309  // Masked load has the tail policy argument.
310  if (IsMasked && IsLoad) {
311  // Policy must be a constant.
312  uint64_t Policy = Node->getConstantOperandVal(CurOp++);
313  SDValue PolicyOp = CurDAG->getTargetConstant(Policy, DL, XLenVT);
314  Operands.push_back(PolicyOp);
315  }
316 
317  Operands.push_back(Chain); // Chain.
318  if (Glue)
319  Operands.push_back(Glue);
320 }
321 
322 static bool isAllUndef(ArrayRef<SDValue> Values) {
323  return llvm::all_of(Values, [](SDValue V) { return V->isUndef(); });
324 }
325 
326 void RISCVDAGToDAGISel::selectVLSEG(SDNode *Node, bool IsMasked,
327  bool IsStrided) {
328  SDLoc DL(Node);
329  unsigned NF = Node->getNumValues() - 1;
330  MVT VT = Node->getSimpleValueType(0);
331  unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
333 
334  unsigned CurOp = 2;
336 
337  SmallVector<SDValue, 8> Regs(Node->op_begin() + CurOp,
338  Node->op_begin() + CurOp + NF);
339  bool IsTU = IsMasked || !isAllUndef(Regs);
340  if (IsTU) {
341  SDValue Merge = createTuple(*CurDAG, Regs, NF, LMUL);
342  Operands.push_back(Merge);
343  }
344  CurOp += NF;
345 
346  addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, IsStrided,
347  Operands, /*IsLoad=*/true);
348 
349  const RISCV::VLSEGPseudo *P =
350  RISCV::getVLSEGPseudo(NF, IsMasked, IsTU, IsStrided, /*FF*/ false, Log2SEW,
351  static_cast<unsigned>(LMUL));
354 
355  if (auto *MemOp = dyn_cast<MemSDNode>(Node))
356  CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
357 
358  SDValue SuperReg = SDValue(Load, 0);
359  for (unsigned I = 0; I < NF; ++I) {
360  unsigned SubRegIdx = RISCVTargetLowering::getSubregIndexByMVT(VT, I);
361  ReplaceUses(SDValue(Node, I),
362  CurDAG->getTargetExtractSubreg(SubRegIdx, DL, VT, SuperReg));
363  }
364 
365  ReplaceUses(SDValue(Node, NF), SDValue(Load, 1));
366  CurDAG->RemoveDeadNode(Node);
367 }
368 
369 void RISCVDAGToDAGISel::selectVLSEGFF(SDNode *Node, bool IsMasked) {
370  SDLoc DL(Node);
371  unsigned NF = Node->getNumValues() - 2; // Do not count VL and Chain.
372  MVT VT = Node->getSimpleValueType(0);
373  MVT XLenVT = Subtarget->getXLenVT();
374  unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
376 
377  unsigned CurOp = 2;
379 
380  SmallVector<SDValue, 8> Regs(Node->op_begin() + CurOp,
381  Node->op_begin() + CurOp + NF);
382  bool IsTU = IsMasked || !isAllUndef(Regs);
383  if (IsTU) {
384  SDValue MaskedOff = createTuple(*CurDAG, Regs, NF, LMUL);
385  Operands.push_back(MaskedOff);
386  }
387  CurOp += NF;
388 
389  addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
390  /*IsStridedOrIndexed*/ false, Operands,
391  /*IsLoad=*/true);
392 
393  const RISCV::VLSEGPseudo *P =
394  RISCV::getVLSEGPseudo(NF, IsMasked, IsTU, /*Strided*/ false, /*FF*/ true,
395  Log2SEW, static_cast<unsigned>(LMUL));
397  XLenVT, MVT::Other, Operands);
398 
399  if (auto *MemOp = dyn_cast<MemSDNode>(Node))
400  CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
401 
402  SDValue SuperReg = SDValue(Load, 0);
403  for (unsigned I = 0; I < NF; ++I) {
404  unsigned SubRegIdx = RISCVTargetLowering::getSubregIndexByMVT(VT, I);
405  ReplaceUses(SDValue(Node, I),
406  CurDAG->getTargetExtractSubreg(SubRegIdx, DL, VT, SuperReg));
407  }
408 
409  ReplaceUses(SDValue(Node, NF), SDValue(Load, 1)); // VL
410  ReplaceUses(SDValue(Node, NF + 1), SDValue(Load, 2)); // Chain
411  CurDAG->RemoveDeadNode(Node);
412 }
413 
414 void RISCVDAGToDAGISel::selectVLXSEG(SDNode *Node, bool IsMasked,
415  bool IsOrdered) {
416  SDLoc DL(Node);
417  unsigned NF = Node->getNumValues() - 1;
418  MVT VT = Node->getSimpleValueType(0);
419  unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
421 
422  unsigned CurOp = 2;
424 
425  SmallVector<SDValue, 8> Regs(Node->op_begin() + CurOp,
426  Node->op_begin() + CurOp + NF);
427  bool IsTU = IsMasked || !isAllUndef(Regs);
428  if (IsTU) {
429  SDValue MaskedOff = createTuple(*CurDAG, Regs, NF, LMUL);
430  Operands.push_back(MaskedOff);
431  }
432  CurOp += NF;
433 
434  MVT IndexVT;
435  addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
436  /*IsStridedOrIndexed*/ true, Operands,
437  /*IsLoad=*/true, &IndexVT);
438 
440  "Element count mismatch");
441 
442  RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
443  unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());
444  if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) {
445  report_fatal_error("The V extension does not support EEW=64 for index "
446  "values when XLEN=32");
447  }
448  const RISCV::VLXSEGPseudo *P = RISCV::getVLXSEGPseudo(
449  NF, IsMasked, IsTU, IsOrdered, IndexLog2EEW, static_cast<unsigned>(LMUL),
450  static_cast<unsigned>(IndexLMUL));
453 
454  if (auto *MemOp = dyn_cast<MemSDNode>(Node))
455  CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
456 
457  SDValue SuperReg = SDValue(Load, 0);
458  for (unsigned I = 0; I < NF; ++I) {
459  unsigned SubRegIdx = RISCVTargetLowering::getSubregIndexByMVT(VT, I);
460  ReplaceUses(SDValue(Node, I),
461  CurDAG->getTargetExtractSubreg(SubRegIdx, DL, VT, SuperReg));
462  }
463 
464  ReplaceUses(SDValue(Node, NF), SDValue(Load, 1));
465  CurDAG->RemoveDeadNode(Node);
466 }
467 
468 void RISCVDAGToDAGISel::selectVSSEG(SDNode *Node, bool IsMasked,
469  bool IsStrided) {
470  SDLoc DL(Node);
471  unsigned NF = Node->getNumOperands() - 4;
472  if (IsStrided)
473  NF--;
474  if (IsMasked)
475  NF--;
476  MVT VT = Node->getOperand(2)->getSimpleValueType(0);
477  unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
479  SmallVector<SDValue, 8> Regs(Node->op_begin() + 2, Node->op_begin() + 2 + NF);
480  SDValue StoreVal = createTuple(*CurDAG, Regs, NF, LMUL);
481 
483  Operands.push_back(StoreVal);
484  unsigned CurOp = 2 + NF;
485 
486  addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, IsStrided,
487  Operands);
488 
489  const RISCV::VSSEGPseudo *P = RISCV::getVSSEGPseudo(
490  NF, IsMasked, IsStrided, Log2SEW, static_cast<unsigned>(LMUL));
492  CurDAG->getMachineNode(P->Pseudo, DL, Node->getValueType(0), Operands);
493 
494  if (auto *MemOp = dyn_cast<MemSDNode>(Node))
495  CurDAG->setNodeMemRefs(Store, {MemOp->getMemOperand()});
496 
497  ReplaceNode(Node, Store);
498 }
499 
500 void RISCVDAGToDAGISel::selectVSXSEG(SDNode *Node, bool IsMasked,
501  bool IsOrdered) {
502  SDLoc DL(Node);
503  unsigned NF = Node->getNumOperands() - 5;
504  if (IsMasked)
505  --NF;
506  MVT VT = Node->getOperand(2)->getSimpleValueType(0);
507  unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
509  SmallVector<SDValue, 8> Regs(Node->op_begin() + 2, Node->op_begin() + 2 + NF);
510  SDValue StoreVal = createTuple(*CurDAG, Regs, NF, LMUL);
511 
513  Operands.push_back(StoreVal);
514  unsigned CurOp = 2 + NF;
515 
516  MVT IndexVT;
517  addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
518  /*IsStridedOrIndexed*/ true, Operands,
519  /*IsLoad=*/false, &IndexVT);
520 
522  "Element count mismatch");
523 
524  RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
525  unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());
526  if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) {
527  report_fatal_error("The V extension does not support EEW=64 for index "
528  "values when XLEN=32");
529  }
530  const RISCV::VSXSEGPseudo *P = RISCV::getVSXSEGPseudo(
531  NF, IsMasked, IsOrdered, IndexLog2EEW, static_cast<unsigned>(LMUL),
532  static_cast<unsigned>(IndexLMUL));
534  CurDAG->getMachineNode(P->Pseudo, DL, Node->getValueType(0), Operands);
535 
536  if (auto *MemOp = dyn_cast<MemSDNode>(Node))
537  CurDAG->setNodeMemRefs(Store, {MemOp->getMemOperand()});
538 
539  ReplaceNode(Node, Store);
540 }
541 
543  if (!Subtarget->hasVInstructions())
544  return;
545 
546  assert((Node->getOpcode() == ISD::INTRINSIC_W_CHAIN ||
547  Node->getOpcode() == ISD::INTRINSIC_WO_CHAIN) &&
548  "Unexpected opcode");
549 
550  SDLoc DL(Node);
551  MVT XLenVT = Subtarget->getXLenVT();
552 
553  bool HasChain = Node->getOpcode() == ISD::INTRINSIC_W_CHAIN;
554  unsigned IntNoOffset = HasChain ? 1 : 0;
555  unsigned IntNo = Node->getConstantOperandVal(IntNoOffset);
556 
557  assert((IntNo == Intrinsic::riscv_vsetvli ||
558  IntNo == Intrinsic::riscv_vsetvlimax ||
559  IntNo == Intrinsic::riscv_vsetvli_opt ||
560  IntNo == Intrinsic::riscv_vsetvlimax_opt) &&
561  "Unexpected vsetvli intrinsic");
562 
563  bool VLMax = IntNo == Intrinsic::riscv_vsetvlimax ||
564  IntNo == Intrinsic::riscv_vsetvlimax_opt;
565  unsigned Offset = IntNoOffset + (VLMax ? 1 : 2);
566 
567  assert(Node->getNumOperands() == Offset + 2 &&
568  "Unexpected number of operands");
569 
570  unsigned SEW =
571  RISCVVType::decodeVSEW(Node->getConstantOperandVal(Offset) & 0x7);
572  RISCVII::VLMUL VLMul = static_cast<RISCVII::VLMUL>(
573  Node->getConstantOperandVal(Offset + 1) & 0x7);
574 
575  unsigned VTypeI = RISCVVType::encodeVTYPE(VLMul, SEW, /*TailAgnostic*/ true,
576  /*MaskAgnostic*/ false);
577  SDValue VTypeIOp = CurDAG->getTargetConstant(VTypeI, DL, XLenVT);
578 
579  SmallVector<EVT, 2> VTs = {XLenVT};
580  if (HasChain)
581  VTs.push_back(MVT::Other);
582 
583  SDValue VLOperand;
584  unsigned Opcode = RISCV::PseudoVSETVLI;
585  if (VLMax) {
586  VLOperand = CurDAG->getRegister(RISCV::X0, XLenVT);
587  Opcode = RISCV::PseudoVSETVLIX0;
588  } else {
589  VLOperand = Node->getOperand(IntNoOffset + 1);
590 
591  if (auto *C = dyn_cast<ConstantSDNode>(VLOperand)) {
592  uint64_t AVL = C->getZExtValue();
593  if (isUInt<5>(AVL)) {
594  SDValue VLImm = CurDAG->getTargetConstant(AVL, DL, XLenVT);
595  SmallVector<SDValue, 3> Ops = {VLImm, VTypeIOp};
596  if (HasChain)
597  Ops.push_back(Node->getOperand(0));
598  ReplaceNode(
599  Node, CurDAG->getMachineNode(RISCV::PseudoVSETIVLI, DL, VTs, Ops));
600  return;
601  }
602  }
603  }
604 
605  SmallVector<SDValue, 3> Ops = {VLOperand, VTypeIOp};
606  if (HasChain)
607  Ops.push_back(Node->getOperand(0));
608 
609  ReplaceNode(Node, CurDAG->getMachineNode(Opcode, DL, VTs, Ops));
610 }
611 
613  // If we have a custom node, we have already selected.
614  if (Node->isMachineOpcode()) {
615  LLVM_DEBUG(dbgs() << "== "; Node->dump(CurDAG); dbgs() << "\n");
616  Node->setNodeId(-1);
617  return;
618  }
619 
620  // Instruction Selection not handled by the auto-generated tablegen selection
621  // should be handled here.
622  unsigned Opcode = Node->getOpcode();
623  MVT XLenVT = Subtarget->getXLenVT();
624  SDLoc DL(Node);
625  MVT VT = Node->getSimpleValueType(0);
626 
627  switch (Opcode) {
628  case ISD::Constant: {
629  auto *ConstNode = cast<ConstantSDNode>(Node);
630  if (VT == XLenVT && ConstNode->isZero()) {
631  SDValue New =
632  CurDAG->getCopyFromReg(CurDAG->getEntryNode(), DL, RISCV::X0, XLenVT);
633  ReplaceNode(Node, New.getNode());
634  return;
635  }
636  int64_t Imm = ConstNode->getSExtValue();
637  // If the upper XLen-16 bits are not used, try to convert this to a simm12
638  // by sign extending bit 15.
639  if (isUInt<16>(Imm) && isInt<12>(SignExtend64<16>(Imm)) &&
640  hasAllHUsers(Node))
641  Imm = SignExtend64<16>(Imm);
642  // If the upper 32-bits are not used try to convert this into a simm32 by
643  // sign extending bit 32.
644  if (!isInt<32>(Imm) && isUInt<32>(Imm) && hasAllWUsers(Node))
645  Imm = SignExtend64<32>(Imm);
646 
647  ReplaceNode(Node, selectImm(CurDAG, DL, VT, Imm, *Subtarget));
648  return;
649  }
650  case ISD::ADD: {
651  // Try to select ADD + immediate used as memory addresses to
652  // (ADDI (ADD X, Imm-Lo12), Lo12) if it will allow the ADDI to be removed by
653  // doPeepholeLoadStoreADDI.
654 
655  // LHS should be an immediate.
656  auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
657  if (!N1C)
658  break;
659 
660  int64_t Offset = N1C->getSExtValue();
661  int64_t Lo12 = SignExtend64<12>(Offset);
662 
663  // Don't do this if the lower 12 bits are 0 or we could use ADDI directly.
664  if (Lo12 == 0 || isInt<12>(Offset))
665  break;
666 
667  // Don't do this if we can use a pair of ADDIs.
668  if (isInt<12>(Offset / 2) && isInt<12>(Offset - Offset / 2))
669  break;
670 
671  bool AllPointerUses = true;
672  for (auto UI = Node->use_begin(), UE = Node->use_end(); UI != UE; ++UI) {
673  SDNode *User = *UI;
674 
675  // Is this user a memory instruction that uses a register and immediate
676  // that has this ADD as its pointer.
677  unsigned BaseOpIdx, OffsetOpIdx;
678  if (!User->isMachineOpcode() ||
679  !hasMemOffset(User, BaseOpIdx, OffsetOpIdx) ||
680  UI.getOperandNo() != BaseOpIdx) {
681  AllPointerUses = false;
682  break;
683  }
684 
685  // If the memory instruction already has an offset, make sure the combined
686  // offset is foldable.
687  int64_t MemOffs =
688  cast<ConstantSDNode>(User->getOperand(OffsetOpIdx))->getSExtValue();
689  MemOffs += Lo12;
690  if (!isInt<12>(MemOffs)) {
691  AllPointerUses = false;
692  break;
693  }
694  }
695 
696  if (!AllPointerUses)
697  break;
698 
699  Offset -= Lo12;
700  // Restore sign bits for RV32.
701  if (!Subtarget->is64Bit())
702  Offset = SignExtend64<32>(Offset);
703 
704  // Emit (ADDI (ADD X, Hi), Lo)
705  SDNode *Imm = selectImm(CurDAG, DL, VT, Offset, *Subtarget);
707  Node->getOperand(0), SDValue(Imm, 0));
708  SDNode *ADDI =
709  CurDAG->getMachineNode(RISCV::ADDI, DL, VT, SDValue(ADD, 0),
710  CurDAG->getTargetConstant(Lo12, DL, VT));
711  ReplaceNode(Node, ADDI);
712  return;
713  }
714  case ISD::SRL: {
715  // Optimize (srl (and X, C2), C) ->
716  // (srli (slli X, (XLen-C3), (XLen-C3) + C)
717  // Where C2 is a mask with C3 trailing ones.
718  // Taking into account that the C2 may have had lower bits unset by
719  // SimplifyDemandedBits. This avoids materializing the C2 immediate.
720  // This pattern occurs when type legalizing right shifts for types with
721  // less than XLen bits.
722  auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
723  if (!N1C)
724  break;
725  SDValue N0 = Node->getOperand(0);
726  if (N0.getOpcode() != ISD::AND || !N0.hasOneUse() ||
727  !isa<ConstantSDNode>(N0.getOperand(1)))
728  break;
729  unsigned ShAmt = N1C->getZExtValue();
731  Mask |= maskTrailingOnes<uint64_t>(ShAmt);
732  if (!isMask_64(Mask))
733  break;
734  unsigned TrailingOnes = countTrailingOnes(Mask);
735  // 32 trailing ones should use srliw via tablegen pattern.
736  if (TrailingOnes == 32 || ShAmt >= TrailingOnes)
737  break;
738  unsigned LShAmt = Subtarget->getXLen() - TrailingOnes;
739  SDNode *SLLI =
740  CurDAG->getMachineNode(RISCV::SLLI, DL, VT, N0->getOperand(0),
741  CurDAG->getTargetConstant(LShAmt, DL, VT));
742  SDNode *SRLI = CurDAG->getMachineNode(
743  RISCV::SRLI, DL, VT, SDValue(SLLI, 0),
744  CurDAG->getTargetConstant(LShAmt + ShAmt, DL, VT));
745  ReplaceNode(Node, SRLI);
746  return;
747  }
748  case ISD::SRA: {
749  // Optimize (sra (sext_inreg X, i16), C) ->
750  // (srai (slli X, (XLen-16), (XLen-16) + C)
751  // And (sra (sext_inreg X, i8), C) ->
752  // (srai (slli X, (XLen-8), (XLen-8) + C)
753  // This can occur when Zbb is enabled, which makes sext_inreg i16/i8 legal.
754  // This transform matches the code we get without Zbb. The shifts are more
755  // compressible, and this can help expose CSE opportunities in the sdiv by
756  // constant optimization.
757  auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
758  if (!N1C)
759  break;
760  SDValue N0 = Node->getOperand(0);
761  if (N0.getOpcode() != ISD::SIGN_EXTEND_INREG || !N0.hasOneUse())
762  break;
763  unsigned ShAmt = N1C->getZExtValue();
764  unsigned ExtSize =
765  cast<VTSDNode>(N0.getOperand(1))->getVT().getSizeInBits();
766  // ExtSize of 32 should use sraiw via tablegen pattern.
767  if (ExtSize >= 32 || ShAmt >= ExtSize)
768  break;
769  unsigned LShAmt = Subtarget->getXLen() - ExtSize;
770  SDNode *SLLI =
771  CurDAG->getMachineNode(RISCV::SLLI, DL, VT, N0->getOperand(0),
772  CurDAG->getTargetConstant(LShAmt, DL, VT));
773  SDNode *SRAI = CurDAG->getMachineNode(
774  RISCV::SRAI, DL, VT, SDValue(SLLI, 0),
775  CurDAG->getTargetConstant(LShAmt + ShAmt, DL, VT));
776  ReplaceNode(Node, SRAI);
777  return;
778  }
779  case ISD::AND: {
780  auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
781  if (!N1C)
782  break;
783 
784  SDValue N0 = Node->getOperand(0);
785 
786  bool LeftShift = N0.getOpcode() == ISD::SHL;
787  if (!LeftShift && N0.getOpcode() != ISD::SRL)
788  break;
789 
790  auto *C = dyn_cast<ConstantSDNode>(N0.getOperand(1));
791  if (!C)
792  break;
793  uint64_t C2 = C->getZExtValue();
794  unsigned XLen = Subtarget->getXLen();
795  if (!C2 || C2 >= XLen)
796  break;
797 
798  uint64_t C1 = N1C->getZExtValue();
799 
800  // Keep track of whether this is a c.andi. If we can't use c.andi, the
801  // shift pair might offer more compression opportunities.
802  // TODO: We could check for C extension here, but we don't have many lit
803  // tests with the C extension enabled so not checking gets better coverage.
804  // TODO: What if ANDI faster than shift?
805  bool IsCANDI = isInt<6>(N1C->getSExtValue());
806 
807  // Clear irrelevant bits in the mask.
808  if (LeftShift)
809  C1 &= maskTrailingZeros<uint64_t>(C2);
810  else
811  C1 &= maskTrailingOnes<uint64_t>(XLen - C2);
812 
813  // Some transforms should only be done if the shift has a single use or
814  // the AND would become (srli (slli X, 32), 32)
815  bool OneUseOrZExtW = N0.hasOneUse() || C1 == UINT64_C(0xFFFFFFFF);
816 
817  SDValue X = N0.getOperand(0);
818 
819  // Turn (and (srl x, c2) c1) -> (srli (slli x, c3-c2), c3) if c1 is a mask
820  // with c3 leading zeros.
821  if (!LeftShift && isMask_64(C1)) {
822  uint64_t C3 = XLen - (64 - countLeadingZeros(C1));
823  if (C2 < C3) {
824  // If the number of leading zeros is C2+32 this can be SRLIW.
825  if (C2 + 32 == C3) {
826  SDNode *SRLIW =
827  CurDAG->getMachineNode(RISCV::SRLIW, DL, XLenVT, X,
828  CurDAG->getTargetConstant(C2, DL, XLenVT));
829  ReplaceNode(Node, SRLIW);
830  return;
831  }
832 
833  // (and (srl (sexti32 Y), c2), c1) -> (srliw (sraiw Y, 31), c3 - 32) if
834  // c1 is a mask with c3 leading zeros and c2 >= 32 and c3-c2==1.
835  //
836  // This pattern occurs when (i32 (srl (sra 31), c3 - 32)) is type
837  // legalized and goes through DAG combine.
838  if (C2 >= 32 && (C3 - C2) == 1 && N0.hasOneUse() &&
839  X.getOpcode() == ISD::SIGN_EXTEND_INREG &&
840  cast<VTSDNode>(X.getOperand(1))->getVT() == MVT::i32) {
841  SDNode *SRAIW =
842  CurDAG->getMachineNode(RISCV::SRAIW, DL, XLenVT, X.getOperand(0),
843  CurDAG->getTargetConstant(31, DL, XLenVT));
844  SDNode *SRLIW = CurDAG->getMachineNode(
845  RISCV::SRLIW, DL, XLenVT, SDValue(SRAIW, 0),
846  CurDAG->getTargetConstant(C3 - 32, DL, XLenVT));
847  ReplaceNode(Node, SRLIW);
848  return;
849  }
850 
851  // (srli (slli x, c3-c2), c3).
852  // Skip if we could use (zext.w (sraiw X, C2)).
853  bool Skip = Subtarget->hasStdExtZba() && C3 == 32 &&
854  X.getOpcode() == ISD::SIGN_EXTEND_INREG &&
855  cast<VTSDNode>(X.getOperand(1))->getVT() == MVT::i32;
856  // Also Skip if we can use bexti.
857  Skip |= Subtarget->hasStdExtZbs() && C3 == XLen - 1;
858  if (OneUseOrZExtW && !Skip) {
859  SDNode *SLLI = CurDAG->getMachineNode(
860  RISCV::SLLI, DL, XLenVT, X,
861  CurDAG->getTargetConstant(C3 - C2, DL, XLenVT));
862  SDNode *SRLI =
863  CurDAG->getMachineNode(RISCV::SRLI, DL, XLenVT, SDValue(SLLI, 0),
864  CurDAG->getTargetConstant(C3, DL, XLenVT));
865  ReplaceNode(Node, SRLI);
866  return;
867  }
868  }
869  }
870 
871  // Turn (and (shl x, c2), c1) -> (srli (slli c2+c3), c3) if c1 is a mask
872  // shifted by c2 bits with c3 leading zeros.
873  if (LeftShift && isShiftedMask_64(C1)) {
874  uint64_t C3 = XLen - (64 - countLeadingZeros(C1));
875 
876  if (C2 + C3 < XLen &&
877  C1 == (maskTrailingOnes<uint64_t>(XLen - (C2 + C3)) << C2)) {
878  // Use slli.uw when possible.
879  if ((XLen - (C2 + C3)) == 32 && Subtarget->hasStdExtZba()) {
880  SDNode *SLLI_UW =
881  CurDAG->getMachineNode(RISCV::SLLI_UW, DL, XLenVT, X,
882  CurDAG->getTargetConstant(C2, DL, XLenVT));
883  ReplaceNode(Node, SLLI_UW);
884  return;
885  }
886 
887  // (srli (slli c2+c3), c3)
888  if (OneUseOrZExtW && !IsCANDI) {
889  SDNode *SLLI = CurDAG->getMachineNode(
890  RISCV::SLLI, DL, XLenVT, X,
891  CurDAG->getTargetConstant(C2 + C3, DL, XLenVT));
892  SDNode *SRLI =
893  CurDAG->getMachineNode(RISCV::SRLI, DL, XLenVT, SDValue(SLLI, 0),
894  CurDAG->getTargetConstant(C3, DL, XLenVT));
895  ReplaceNode(Node, SRLI);
896  return;
897  }
898  }
899  }
900 
901  // Turn (and (shr x, c2), c1) -> (slli (srli x, c2+c3), c3) if c1 is a
902  // shifted mask with c2 leading zeros and c3 trailing zeros.
903  if (!LeftShift && isShiftedMask_64(C1)) {
904  uint64_t Leading = XLen - (64 - countLeadingZeros(C1));
906  if (Leading == C2 && C2 + C3 < XLen && OneUseOrZExtW && !IsCANDI) {
907  unsigned SrliOpc = RISCV::SRLI;
908  // If the input is zexti32 we should use SRLIW.
909  if (X.getOpcode() == ISD::AND && isa<ConstantSDNode>(X.getOperand(1)) &&
910  X.getConstantOperandVal(1) == UINT64_C(0xFFFFFFFF)) {
911  SrliOpc = RISCV::SRLIW;
912  X = X.getOperand(0);
913  }
914  SDNode *SRLI = CurDAG->getMachineNode(
915  SrliOpc, DL, XLenVT, X,
916  CurDAG->getTargetConstant(C2 + C3, DL, XLenVT));
917  SDNode *SLLI =
918  CurDAG->getMachineNode(RISCV::SLLI, DL, XLenVT, SDValue(SRLI, 0),
919  CurDAG->getTargetConstant(C3, DL, XLenVT));
920  ReplaceNode(Node, SLLI);
921  return;
922  }
923  // If the leading zero count is C2+32, we can use SRLIW instead of SRLI.
924  if (Leading > 32 && (Leading - 32) == C2 && C2 + C3 < 32 &&
925  OneUseOrZExtW && !IsCANDI) {
926  SDNode *SRLIW = CurDAG->getMachineNode(
927  RISCV::SRLIW, DL, XLenVT, X,
928  CurDAG->getTargetConstant(C2 + C3, DL, XLenVT));
929  SDNode *SLLI =
930  CurDAG->getMachineNode(RISCV::SLLI, DL, XLenVT, SDValue(SRLIW, 0),
931  CurDAG->getTargetConstant(C3, DL, XLenVT));
932  ReplaceNode(Node, SLLI);
933  return;
934  }
935  }
936 
937  // Turn (and (shl x, c2), c1) -> (slli (srli x, c3-c2), c3) if c1 is a
938  // shifted mask with no leading zeros and c3 trailing zeros.
939  if (LeftShift && isShiftedMask_64(C1)) {
940  uint64_t Leading = XLen - (64 - countLeadingZeros(C1));
942  if (Leading == 0 && C2 < C3 && OneUseOrZExtW && !IsCANDI) {
943  SDNode *SRLI = CurDAG->getMachineNode(
944  RISCV::SRLI, DL, XLenVT, X,
945  CurDAG->getTargetConstant(C3 - C2, DL, XLenVT));
946  SDNode *SLLI =
947  CurDAG->getMachineNode(RISCV::SLLI, DL, XLenVT, SDValue(SRLI, 0),
948  CurDAG->getTargetConstant(C3, DL, XLenVT));
949  ReplaceNode(Node, SLLI);
950  return;
951  }
952  // If we have (32-C2) leading zeros, we can use SRLIW instead of SRLI.
953  if (C2 < C3 && Leading + C2 == 32 && OneUseOrZExtW && !IsCANDI) {
954  SDNode *SRLIW = CurDAG->getMachineNode(
955  RISCV::SRLIW, DL, XLenVT, X,
956  CurDAG->getTargetConstant(C3 - C2, DL, XLenVT));
957  SDNode *SLLI =
958  CurDAG->getMachineNode(RISCV::SLLI, DL, XLenVT, SDValue(SRLIW, 0),
959  CurDAG->getTargetConstant(C3, DL, XLenVT));
960  ReplaceNode(Node, SLLI);
961  return;
962  }
963  }
964 
965  break;
966  }
967  case ISD::MUL: {
968  // Special case for calculating (mul (and X, C2), C1) where the full product
969  // fits in XLen bits. We can shift X left by the number of leading zeros in
970  // C2 and shift C1 left by XLen-lzcnt(C2). This will ensure the final
971  // product has XLen trailing zeros, putting it in the output of MULHU. This
972  // can avoid materializing a constant in a register for C2.
973 
974  // RHS should be a constant.
975  auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
976  if (!N1C || !N1C->hasOneUse())
977  break;
978 
979  // LHS should be an AND with constant.
980  SDValue N0 = Node->getOperand(0);
981  if (N0.getOpcode() != ISD::AND || !isa<ConstantSDNode>(N0.getOperand(1)))
982  break;
983 
984  uint64_t C2 = cast<ConstantSDNode>(N0.getOperand(1))->getZExtValue();
985 
986  // Constant should be a mask.
987  if (!isMask_64(C2))
988  break;
989 
990  // This should be the only use of the AND unless we will use
991  // (SRLI (SLLI X, 32), 32). We don't use a shift pair for other AND
992  // constants.
993  if (!N0.hasOneUse() && C2 != UINT64_C(0xFFFFFFFF))
994  break;
995 
996  // If this can be an ANDI, ZEXT.H or ZEXT.W we don't need to do this
997  // optimization.
998  if (isInt<12>(C2) ||
999  (C2 == UINT64_C(0xFFFF) &&
1000  (Subtarget->hasStdExtZbb() || Subtarget->hasStdExtZbp())) ||
1001  (C2 == UINT64_C(0xFFFFFFFF) && Subtarget->hasStdExtZba()))
1002  break;
1003 
1004  // We need to shift left the AND input and C1 by a total of XLen bits.
1005 
1006  // How far left do we need to shift the AND input?
1007  unsigned XLen = Subtarget->getXLen();
1008  unsigned LeadingZeros = XLen - (64 - countLeadingZeros(C2));
1009 
1010  // The constant gets shifted by the remaining amount unless that would
1011  // shift bits out.
1012  uint64_t C1 = N1C->getZExtValue();
1013  unsigned ConstantShift = XLen - LeadingZeros;
1014  if (ConstantShift > (XLen - (64 - countLeadingZeros(C1))))
1015  break;
1016 
1017  uint64_t ShiftedC1 = C1 << ConstantShift;
1018  // If this RV32, we need to sign extend the constant.
1019  if (XLen == 32)
1020  ShiftedC1 = SignExtend64<32>(ShiftedC1);
1021 
1022  // Create (mulhu (slli X, lzcnt(C2)), C1 << (XLen - lzcnt(C2))).
1023  SDNode *Imm = selectImm(CurDAG, DL, VT, ShiftedC1, *Subtarget);
1024  SDNode *SLLI =
1025  CurDAG->getMachineNode(RISCV::SLLI, DL, VT, N0.getOperand(0),
1026  CurDAG->getTargetConstant(LeadingZeros, DL, VT));
1028  SDValue(SLLI, 0), SDValue(Imm, 0));
1029  ReplaceNode(Node, MULHU);
1030  return;
1031  }
1032  case ISD::INTRINSIC_WO_CHAIN: {
1033  unsigned IntNo = Node->getConstantOperandVal(0);
1034  switch (IntNo) {
1035  // By default we do not custom select any intrinsic.
1036  default:
1037  break;
1038  case Intrinsic::riscv_vmsgeu:
1039  case Intrinsic::riscv_vmsge: {
1040  SDValue Src1 = Node->getOperand(1);
1041  SDValue Src2 = Node->getOperand(2);
1042  bool IsUnsigned = IntNo == Intrinsic::riscv_vmsgeu;
1043  bool IsCmpUnsignedZero = false;
1044  // Only custom select scalar second operand.
1045  if (Src2.getValueType() != XLenVT)
1046  break;
1047  // Small constants are handled with patterns.
1048  if (auto *C = dyn_cast<ConstantSDNode>(Src2)) {
1049  int64_t CVal = C->getSExtValue();
1050  if (CVal >= -15 && CVal <= 16) {
1051  if (!IsUnsigned || CVal != 0)
1052  break;
1053  IsCmpUnsignedZero = true;
1054  }
1055  }
1056  MVT Src1VT = Src1.getSimpleValueType();
1057  unsigned VMSLTOpcode, VMNANDOpcode, VMSetOpcode;
1058  switch (RISCVTargetLowering::getLMUL(Src1VT)) {
1059  default:
1060  llvm_unreachable("Unexpected LMUL!");
1061 #define CASE_VMSLT_VMNAND_VMSET_OPCODES(lmulenum, suffix, suffix_b) \
1062  case RISCVII::VLMUL::lmulenum: \
1063  VMSLTOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_##suffix \
1064  : RISCV::PseudoVMSLT_VX_##suffix; \
1065  VMNANDOpcode = RISCV::PseudoVMNAND_MM_##suffix; \
1066  VMSetOpcode = RISCV::PseudoVMSET_M_##suffix_b; \
1067  break;
1075 #undef CASE_VMSLT_VMNAND_VMSET_OPCODES
1076  }
1078  Log2_32(Src1VT.getScalarSizeInBits()), DL, XLenVT);
1079  SDValue VL;
1080  selectVLOp(Node->getOperand(3), VL);
1081 
1082  // If vmsgeu with 0 immediate, expand it to vmset.
1083  if (IsCmpUnsignedZero) {
1084  ReplaceNode(Node, CurDAG->getMachineNode(VMSetOpcode, DL, VT, VL, SEW));
1085  return;
1086  }
1087 
1088  // Expand to
1089  // vmslt{u}.vx vd, va, x; vmnand.mm vd, vd, vd
1090  SDValue Cmp = SDValue(
1091  CurDAG->getMachineNode(VMSLTOpcode, DL, VT, {Src1, Src2, VL, SEW}),
1092  0);
1093  ReplaceNode(Node, CurDAG->getMachineNode(VMNANDOpcode, DL, VT,
1094  {Cmp, Cmp, VL, SEW}));
1095  return;
1096  }
1097  case Intrinsic::riscv_vmsgeu_mask:
1098  case Intrinsic::riscv_vmsge_mask: {
1099  SDValue Src1 = Node->getOperand(2);
1100  SDValue Src2 = Node->getOperand(3);
1101  bool IsUnsigned = IntNo == Intrinsic::riscv_vmsgeu_mask;
1102  bool IsCmpUnsignedZero = false;
1103  // Only custom select scalar second operand.
1104  if (Src2.getValueType() != XLenVT)
1105  break;
1106  // Small constants are handled with patterns.
1107  if (auto *C = dyn_cast<ConstantSDNode>(Src2)) {
1108  int64_t CVal = C->getSExtValue();
1109  if (CVal >= -15 && CVal <= 16) {
1110  if (!IsUnsigned || CVal != 0)
1111  break;
1112  IsCmpUnsignedZero = true;
1113  }
1114  }
1115  MVT Src1VT = Src1.getSimpleValueType();
1116  unsigned VMSLTOpcode, VMSLTMaskOpcode, VMXOROpcode, VMANDNOpcode,
1117  VMOROpcode;
1118  switch (RISCVTargetLowering::getLMUL(Src1VT)) {
1119  default:
1120  llvm_unreachable("Unexpected LMUL!");
1121 #define CASE_VMSLT_OPCODES(lmulenum, suffix, suffix_b) \
1122  case RISCVII::VLMUL::lmulenum: \
1123  VMSLTOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_##suffix \
1124  : RISCV::PseudoVMSLT_VX_##suffix; \
1125  VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_##suffix##_MASK \
1126  : RISCV::PseudoVMSLT_VX_##suffix##_MASK; \
1127  break;
1129  CASE_VMSLT_OPCODES(LMUL_F4, MF4, B2)
1130  CASE_VMSLT_OPCODES(LMUL_F2, MF2, B4)
1132  CASE_VMSLT_OPCODES(LMUL_2, M2, B16)
1133  CASE_VMSLT_OPCODES(LMUL_4, M4, B32)
1134  CASE_VMSLT_OPCODES(LMUL_8, M8, B64)
1135 #undef CASE_VMSLT_OPCODES
1136  }
1137  // Mask operations use the LMUL from the mask type.
1138  switch (RISCVTargetLowering::getLMUL(VT)) {
1139  default:
1140  llvm_unreachable("Unexpected LMUL!");
1141 #define CASE_VMXOR_VMANDN_VMOR_OPCODES(lmulenum, suffix) \
1142  case RISCVII::VLMUL::lmulenum: \
1143  VMXOROpcode = RISCV::PseudoVMXOR_MM_##suffix; \
1144  VMANDNOpcode = RISCV::PseudoVMANDN_MM_##suffix; \
1145  VMOROpcode = RISCV::PseudoVMOR_MM_##suffix; \
1146  break;
1154 #undef CASE_VMXOR_VMANDN_VMOR_OPCODES
1155  }
1157  Log2_32(Src1VT.getScalarSizeInBits()), DL, XLenVT);
1158  SDValue MaskSEW = CurDAG->getTargetConstant(0, DL, XLenVT);
1159  SDValue VL;
1160  selectVLOp(Node->getOperand(5), VL);
1161  SDValue MaskedOff = Node->getOperand(1);
1162  SDValue Mask = Node->getOperand(4);
1163 
1164  // If vmsgeu_mask with 0 immediate, expand it to vmor mask, maskedoff.
1165  if (IsCmpUnsignedZero) {
1166  // We don't need vmor if the MaskedOff and the Mask are the same
1167  // value.
1168  if (Mask == MaskedOff) {
1169  ReplaceUses(Node, Mask.getNode());
1170  return;
1171  }
1172  ReplaceNode(Node,
1173  CurDAG->getMachineNode(VMOROpcode, DL, VT,
1174  {Mask, MaskedOff, VL, MaskSEW}));
1175  return;
1176  }
1177 
1178  // If the MaskedOff value and the Mask are the same value use
1179  // vmslt{u}.vx vt, va, x; vmandn.mm vd, vd, vt
1180  // This avoids needing to copy v0 to vd before starting the next sequence.
1181  if (Mask == MaskedOff) {
1182  SDValue Cmp = SDValue(
1183  CurDAG->getMachineNode(VMSLTOpcode, DL, VT, {Src1, Src2, VL, SEW}),
1184  0);
1185  ReplaceNode(Node, CurDAG->getMachineNode(VMANDNOpcode, DL, VT,
1186  {Mask, Cmp, VL, MaskSEW}));
1187  return;
1188  }
1189 
1190  // Mask needs to be copied to V0.
1192  RISCV::V0, Mask, SDValue());
1193  SDValue Glue = Chain.getValue(1);
1194  SDValue V0 = CurDAG->getRegister(RISCV::V0, VT);
1195 
1196  // Otherwise use
1197  // vmslt{u}.vx vd, va, x, v0.t; vmxor.mm vd, vd, v0
1198  // The result is mask undisturbed.
1199  // We use the same instructions to emulate mask agnostic behavior, because
1200  // the agnostic result can be either undisturbed or all 1.
1201  SDValue Cmp = SDValue(
1202  CurDAG->getMachineNode(VMSLTMaskOpcode, DL, VT,
1203  {MaskedOff, Src1, Src2, V0, VL, SEW, Glue}),
1204  0);
1205  // vmxor.mm vd, vd, v0 is used to update active value.
1206  ReplaceNode(Node, CurDAG->getMachineNode(VMXOROpcode, DL, VT,
1207  {Cmp, Mask, VL, MaskSEW}));
1208  return;
1209  }
1210  case Intrinsic::riscv_vsetvli_opt:
1211  case Intrinsic::riscv_vsetvlimax_opt:
1212  return selectVSETVLI(Node);
1213  }
1214  break;
1215  }
1216  case ISD::INTRINSIC_W_CHAIN: {
1217  unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
1218  switch (IntNo) {
1219  // By default we do not custom select any intrinsic.
1220  default:
1221  break;
1222  case Intrinsic::riscv_vsetvli:
1223  case Intrinsic::riscv_vsetvlimax:
1224  return selectVSETVLI(Node);
1225  case Intrinsic::riscv_vlseg2:
1226  case Intrinsic::riscv_vlseg3:
1227  case Intrinsic::riscv_vlseg4:
1228  case Intrinsic::riscv_vlseg5:
1229  case Intrinsic::riscv_vlseg6:
1230  case Intrinsic::riscv_vlseg7:
1231  case Intrinsic::riscv_vlseg8: {
1232  selectVLSEG(Node, /*IsMasked*/ false, /*IsStrided*/ false);
1233  return;
1234  }
1235  case Intrinsic::riscv_vlseg2_mask:
1236  case Intrinsic::riscv_vlseg3_mask:
1237  case Intrinsic::riscv_vlseg4_mask:
1238  case Intrinsic::riscv_vlseg5_mask:
1239  case Intrinsic::riscv_vlseg6_mask:
1240  case Intrinsic::riscv_vlseg7_mask:
1241  case Intrinsic::riscv_vlseg8_mask: {
1242  selectVLSEG(Node, /*IsMasked*/ true, /*IsStrided*/ false);
1243  return;
1244  }
1245  case Intrinsic::riscv_vlsseg2:
1246  case Intrinsic::riscv_vlsseg3:
1247  case Intrinsic::riscv_vlsseg4:
1248  case Intrinsic::riscv_vlsseg5:
1249  case Intrinsic::riscv_vlsseg6:
1250  case Intrinsic::riscv_vlsseg7:
1251  case Intrinsic::riscv_vlsseg8: {
1252  selectVLSEG(Node, /*IsMasked*/ false, /*IsStrided*/ true);
1253  return;
1254  }
1255  case Intrinsic::riscv_vlsseg2_mask:
1256  case Intrinsic::riscv_vlsseg3_mask:
1257  case Intrinsic::riscv_vlsseg4_mask:
1258  case Intrinsic::riscv_vlsseg5_mask:
1259  case Intrinsic::riscv_vlsseg6_mask:
1260  case Intrinsic::riscv_vlsseg7_mask:
1261  case Intrinsic::riscv_vlsseg8_mask: {
1262  selectVLSEG(Node, /*IsMasked*/ true, /*IsStrided*/ true);
1263  return;
1264  }
1265  case Intrinsic::riscv_vloxseg2:
1266  case Intrinsic::riscv_vloxseg3:
1267  case Intrinsic::riscv_vloxseg4:
1268  case Intrinsic::riscv_vloxseg5:
1269  case Intrinsic::riscv_vloxseg6:
1270  case Intrinsic::riscv_vloxseg7:
1271  case Intrinsic::riscv_vloxseg8:
1272  selectVLXSEG(Node, /*IsMasked*/ false, /*IsOrdered*/ true);
1273  return;
1274  case Intrinsic::riscv_vluxseg2:
1275  case Intrinsic::riscv_vluxseg3:
1276  case Intrinsic::riscv_vluxseg4:
1277  case Intrinsic::riscv_vluxseg5:
1278  case Intrinsic::riscv_vluxseg6:
1279  case Intrinsic::riscv_vluxseg7:
1280  case Intrinsic::riscv_vluxseg8:
1281  selectVLXSEG(Node, /*IsMasked*/ false, /*IsOrdered*/ false);
1282  return;
1283  case Intrinsic::riscv_vloxseg2_mask:
1284  case Intrinsic::riscv_vloxseg3_mask:
1285  case Intrinsic::riscv_vloxseg4_mask:
1286  case Intrinsic::riscv_vloxseg5_mask:
1287  case Intrinsic::riscv_vloxseg6_mask:
1288  case Intrinsic::riscv_vloxseg7_mask:
1289  case Intrinsic::riscv_vloxseg8_mask:
1290  selectVLXSEG(Node, /*IsMasked*/ true, /*IsOrdered*/ true);
1291  return;
1292  case Intrinsic::riscv_vluxseg2_mask:
1293  case Intrinsic::riscv_vluxseg3_mask:
1294  case Intrinsic::riscv_vluxseg4_mask:
1295  case Intrinsic::riscv_vluxseg5_mask:
1296  case Intrinsic::riscv_vluxseg6_mask:
1297  case Intrinsic::riscv_vluxseg7_mask:
1298  case Intrinsic::riscv_vluxseg8_mask:
1299  selectVLXSEG(Node, /*IsMasked*/ true, /*IsOrdered*/ false);
1300  return;
1301  case Intrinsic::riscv_vlseg8ff:
1302  case Intrinsic::riscv_vlseg7ff:
1303  case Intrinsic::riscv_vlseg6ff:
1304  case Intrinsic::riscv_vlseg5ff:
1305  case Intrinsic::riscv_vlseg4ff:
1306  case Intrinsic::riscv_vlseg3ff:
1307  case Intrinsic::riscv_vlseg2ff: {
1308  selectVLSEGFF(Node, /*IsMasked*/ false);
1309  return;
1310  }
1311  case Intrinsic::riscv_vlseg8ff_mask:
1312  case Intrinsic::riscv_vlseg7ff_mask:
1313  case Intrinsic::riscv_vlseg6ff_mask:
1314  case Intrinsic::riscv_vlseg5ff_mask:
1315  case Intrinsic::riscv_vlseg4ff_mask:
1316  case Intrinsic::riscv_vlseg3ff_mask:
1317  case Intrinsic::riscv_vlseg2ff_mask: {
1318  selectVLSEGFF(Node, /*IsMasked*/ true);
1319  return;
1320  }
1321  case Intrinsic::riscv_vloxei:
1322  case Intrinsic::riscv_vloxei_mask:
1323  case Intrinsic::riscv_vluxei:
1324  case Intrinsic::riscv_vluxei_mask: {
1325  bool IsMasked = IntNo == Intrinsic::riscv_vloxei_mask ||
1326  IntNo == Intrinsic::riscv_vluxei_mask;
1327  bool IsOrdered = IntNo == Intrinsic::riscv_vloxei ||
1328  IntNo == Intrinsic::riscv_vloxei_mask;
1329 
1330  MVT VT = Node->getSimpleValueType(0);
1331  unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
1332 
1333  unsigned CurOp = 2;
1334  // Masked intrinsic only have TU version pseduo instructions.
1335  bool IsTU = IsMasked || (!IsMasked && !Node->getOperand(CurOp).isUndef());
1337  if (IsTU)
1338  Operands.push_back(Node->getOperand(CurOp++));
1339  else
1340  // Skip the undef passthru operand for nomask TA version pseudo
1341  CurOp++;
1342 
1343  MVT IndexVT;
1344  addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
1345  /*IsStridedOrIndexed*/ true, Operands,
1346  /*IsLoad=*/true, &IndexVT);
1347 
1349  "Element count mismatch");
1350 
1352  RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
1353  unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());
1354  if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) {
1355  report_fatal_error("The V extension does not support EEW=64 for index "
1356  "values when XLEN=32");
1357  }
1358  const RISCV::VLX_VSXPseudo *P = RISCV::getVLXPseudo(
1359  IsMasked, IsTU, IsOrdered, IndexLog2EEW, static_cast<unsigned>(LMUL),
1360  static_cast<unsigned>(IndexLMUL));
1361  MachineSDNode *Load =
1362  CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
1363 
1364  if (auto *MemOp = dyn_cast<MemSDNode>(Node))
1365  CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
1366 
1367  ReplaceNode(Node, Load);
1368  return;
1369  }
1370  case Intrinsic::riscv_vlm:
1371  case Intrinsic::riscv_vle:
1372  case Intrinsic::riscv_vle_mask:
1373  case Intrinsic::riscv_vlse:
1374  case Intrinsic::riscv_vlse_mask: {
1375  bool IsMasked = IntNo == Intrinsic::riscv_vle_mask ||
1376  IntNo == Intrinsic::riscv_vlse_mask;
1377  bool IsStrided =
1378  IntNo == Intrinsic::riscv_vlse || IntNo == Intrinsic::riscv_vlse_mask;
1379 
1380  MVT VT = Node->getSimpleValueType(0);
1381  unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
1382 
1383  unsigned CurOp = 2;
1384  // The riscv_vlm intrinsic are always tail agnostic and no passthru operand.
1385  bool HasPassthruOperand = IntNo != Intrinsic::riscv_vlm;
1386  // Masked intrinsic only have TU version pseduo instructions.
1387  bool IsTU =
1388  HasPassthruOperand &&
1389  ((!IsMasked && !Node->getOperand(CurOp).isUndef()) || IsMasked);
1391  if (IsTU)
1392  Operands.push_back(Node->getOperand(CurOp++));
1393  else if (HasPassthruOperand)
1394  // Skip the undef passthru operand for nomask TA version pseudo
1395  CurOp++;
1396 
1397  addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, IsStrided,
1398  Operands, /*IsLoad=*/true);
1399 
1401  const RISCV::VLEPseudo *P =
1402  RISCV::getVLEPseudo(IsMasked, IsTU, IsStrided, /*FF*/ false, Log2SEW,
1403  static_cast<unsigned>(LMUL));
1404  MachineSDNode *Load =
1405  CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
1406 
1407  if (auto *MemOp = dyn_cast<MemSDNode>(Node))
1408  CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
1409 
1410  ReplaceNode(Node, Load);
1411  return;
1412  }
1413  case Intrinsic::riscv_vleff:
1414  case Intrinsic::riscv_vleff_mask: {
1415  bool IsMasked = IntNo == Intrinsic::riscv_vleff_mask;
1416 
1417  MVT VT = Node->getSimpleValueType(0);
1418  unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
1419 
1420  unsigned CurOp = 2;
1421  // Masked intrinsic only have TU version pseduo instructions.
1422  bool IsTU = IsMasked || (!IsMasked && !Node->getOperand(CurOp).isUndef());
1424  if (IsTU)
1425  Operands.push_back(Node->getOperand(CurOp++));
1426  else
1427  // Skip the undef passthru operand for nomask TA version pseudo
1428  CurOp++;
1429 
1430  addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
1431  /*IsStridedOrIndexed*/ false, Operands,
1432  /*IsLoad=*/true);
1433 
1435  const RISCV::VLEPseudo *P =
1436  RISCV::getVLEPseudo(IsMasked, IsTU, /*Strided*/ false, /*FF*/ true,
1437  Log2SEW, static_cast<unsigned>(LMUL));
1439  P->Pseudo, DL, Node->getValueType(0), XLenVT, MVT::Other, Operands);
1440  if (auto *MemOp = dyn_cast<MemSDNode>(Node))
1441  CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
1442 
1443  ReplaceNode(Node, Load);
1444  return;
1445  }
1446  }
1447  break;
1448  }
1449  case ISD::INTRINSIC_VOID: {
1450  unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
1451  switch (IntNo) {
1452  case Intrinsic::riscv_vsseg2:
1453  case Intrinsic::riscv_vsseg3:
1454  case Intrinsic::riscv_vsseg4:
1455  case Intrinsic::riscv_vsseg5:
1456  case Intrinsic::riscv_vsseg6:
1457  case Intrinsic::riscv_vsseg7:
1458  case Intrinsic::riscv_vsseg8: {
1459  selectVSSEG(Node, /*IsMasked*/ false, /*IsStrided*/ false);
1460  return;
1461  }
1462  case Intrinsic::riscv_vsseg2_mask:
1463  case Intrinsic::riscv_vsseg3_mask:
1464  case Intrinsic::riscv_vsseg4_mask:
1465  case Intrinsic::riscv_vsseg5_mask:
1466  case Intrinsic::riscv_vsseg6_mask:
1467  case Intrinsic::riscv_vsseg7_mask:
1468  case Intrinsic::riscv_vsseg8_mask: {
1469  selectVSSEG(Node, /*IsMasked*/ true, /*IsStrided*/ false);
1470  return;
1471  }
1472  case Intrinsic::riscv_vssseg2:
1473  case Intrinsic::riscv_vssseg3:
1474  case Intrinsic::riscv_vssseg4:
1475  case Intrinsic::riscv_vssseg5:
1476  case Intrinsic::riscv_vssseg6:
1477  case Intrinsic::riscv_vssseg7:
1478  case Intrinsic::riscv_vssseg8: {
1479  selectVSSEG(Node, /*IsMasked*/ false, /*IsStrided*/ true);
1480  return;
1481  }
1482  case Intrinsic::riscv_vssseg2_mask:
1483  case Intrinsic::riscv_vssseg3_mask:
1484  case Intrinsic::riscv_vssseg4_mask:
1485  case Intrinsic::riscv_vssseg5_mask:
1486  case Intrinsic::riscv_vssseg6_mask:
1487  case Intrinsic::riscv_vssseg7_mask:
1488  case Intrinsic::riscv_vssseg8_mask: {
1489  selectVSSEG(Node, /*IsMasked*/ true, /*IsStrided*/ true);
1490  return;
1491  }
1492  case Intrinsic::riscv_vsoxseg2:
1493  case Intrinsic::riscv_vsoxseg3:
1494  case Intrinsic::riscv_vsoxseg4:
1495  case Intrinsic::riscv_vsoxseg5:
1496  case Intrinsic::riscv_vsoxseg6:
1497  case Intrinsic::riscv_vsoxseg7:
1498  case Intrinsic::riscv_vsoxseg8:
1499  selectVSXSEG(Node, /*IsMasked*/ false, /*IsOrdered*/ true);
1500  return;
1501  case Intrinsic::riscv_vsuxseg2:
1502  case Intrinsic::riscv_vsuxseg3:
1503  case Intrinsic::riscv_vsuxseg4:
1504  case Intrinsic::riscv_vsuxseg5:
1505  case Intrinsic::riscv_vsuxseg6:
1506  case Intrinsic::riscv_vsuxseg7:
1507  case Intrinsic::riscv_vsuxseg8:
1508  selectVSXSEG(Node, /*IsMasked*/ false, /*IsOrdered*/ false);
1509  return;
1510  case Intrinsic::riscv_vsoxseg2_mask:
1511  case Intrinsic::riscv_vsoxseg3_mask:
1512  case Intrinsic::riscv_vsoxseg4_mask:
1513  case Intrinsic::riscv_vsoxseg5_mask:
1514  case Intrinsic::riscv_vsoxseg6_mask:
1515  case Intrinsic::riscv_vsoxseg7_mask:
1516  case Intrinsic::riscv_vsoxseg8_mask:
1517  selectVSXSEG(Node, /*IsMasked*/ true, /*IsOrdered*/ true);
1518  return;
1519  case Intrinsic::riscv_vsuxseg2_mask:
1520  case Intrinsic::riscv_vsuxseg3_mask:
1521  case Intrinsic::riscv_vsuxseg4_mask:
1522  case Intrinsic::riscv_vsuxseg5_mask:
1523  case Intrinsic::riscv_vsuxseg6_mask:
1524  case Intrinsic::riscv_vsuxseg7_mask:
1525  case Intrinsic::riscv_vsuxseg8_mask:
1526  selectVSXSEG(Node, /*IsMasked*/ true, /*IsOrdered*/ false);
1527  return;
1528  case Intrinsic::riscv_vsoxei:
1529  case Intrinsic::riscv_vsoxei_mask:
1530  case Intrinsic::riscv_vsuxei:
1531  case Intrinsic::riscv_vsuxei_mask: {
1532  bool IsMasked = IntNo == Intrinsic::riscv_vsoxei_mask ||
1533  IntNo == Intrinsic::riscv_vsuxei_mask;
1534  bool IsOrdered = IntNo == Intrinsic::riscv_vsoxei ||
1535  IntNo == Intrinsic::riscv_vsoxei_mask;
1536 
1537  MVT VT = Node->getOperand(2)->getSimpleValueType(0);
1538  unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
1539 
1540  unsigned CurOp = 2;
1542  Operands.push_back(Node->getOperand(CurOp++)); // Store value.
1543 
1544  MVT IndexVT;
1545  addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
1546  /*IsStridedOrIndexed*/ true, Operands,
1547  /*IsLoad=*/false, &IndexVT);
1548 
1550  "Element count mismatch");
1551 
1553  RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
1554  unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());
1555  if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) {
1556  report_fatal_error("The V extension does not support EEW=64 for index "
1557  "values when XLEN=32");
1558  }
1559  const RISCV::VLX_VSXPseudo *P = RISCV::getVSXPseudo(
1560  IsMasked, /*TU*/ false, IsOrdered, IndexLog2EEW,
1561  static_cast<unsigned>(LMUL), static_cast<unsigned>(IndexLMUL));
1562  MachineSDNode *Store =
1563  CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
1564 
1565  if (auto *MemOp = dyn_cast<MemSDNode>(Node))
1566  CurDAG->setNodeMemRefs(Store, {MemOp->getMemOperand()});
1567 
1568  ReplaceNode(Node, Store);
1569  return;
1570  }
1571  case Intrinsic::riscv_vsm:
1572  case Intrinsic::riscv_vse:
1573  case Intrinsic::riscv_vse_mask:
1574  case Intrinsic::riscv_vsse:
1575  case Intrinsic::riscv_vsse_mask: {
1576  bool IsMasked = IntNo == Intrinsic::riscv_vse_mask ||
1577  IntNo == Intrinsic::riscv_vsse_mask;
1578  bool IsStrided =
1579  IntNo == Intrinsic::riscv_vsse || IntNo == Intrinsic::riscv_vsse_mask;
1580 
1581  MVT VT = Node->getOperand(2)->getSimpleValueType(0);
1582  unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
1583 
1584  unsigned CurOp = 2;
1586  Operands.push_back(Node->getOperand(CurOp++)); // Store value.
1587 
1588  addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, IsStrided,
1589  Operands);
1590 
1592  const RISCV::VSEPseudo *P = RISCV::getVSEPseudo(
1593  IsMasked, IsStrided, Log2SEW, static_cast<unsigned>(LMUL));
1594  MachineSDNode *Store =
1595  CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
1596  if (auto *MemOp = dyn_cast<MemSDNode>(Node))
1597  CurDAG->setNodeMemRefs(Store, {MemOp->getMemOperand()});
1598 
1599  ReplaceNode(Node, Store);
1600  return;
1601  }
1602  }
1603  break;
1604  }
1605  case ISD::BITCAST: {
1606  MVT SrcVT = Node->getOperand(0).getSimpleValueType();
1607  // Just drop bitcasts between vectors if both are fixed or both are
1608  // scalable.
1609  if ((VT.isScalableVector() && SrcVT.isScalableVector()) ||
1610  (VT.isFixedLengthVector() && SrcVT.isFixedLengthVector())) {
1611  ReplaceUses(SDValue(Node, 0), Node->getOperand(0));
1612  CurDAG->RemoveDeadNode(Node);
1613  return;
1614  }
1615  break;
1616  }
1617  case ISD::INSERT_SUBVECTOR: {
1618  SDValue V = Node->getOperand(0);
1619  SDValue SubV = Node->getOperand(1);
1620  SDLoc DL(SubV);
1621  auto Idx = Node->getConstantOperandVal(2);
1622  MVT SubVecVT = SubV.getSimpleValueType();
1623 
1624  const RISCVTargetLowering &TLI = *Subtarget->getTargetLowering();
1625  MVT SubVecContainerVT = SubVecVT;
1626  // Establish the correct scalable-vector types for any fixed-length type.
1627  if (SubVecVT.isFixedLengthVector())
1628  SubVecContainerVT = TLI.getContainerForFixedLengthVector(SubVecVT);
1629  if (VT.isFixedLengthVector())
1630  VT = TLI.getContainerForFixedLengthVector(VT);
1631 
1632  const auto *TRI = Subtarget->getRegisterInfo();
1633  unsigned SubRegIdx;
1634  std::tie(SubRegIdx, Idx) =
1636  VT, SubVecContainerVT, Idx, TRI);
1637 
1638  // If the Idx hasn't been completely eliminated then this is a subvector
1639  // insert which doesn't naturally align to a vector register. These must
1640  // be handled using instructions to manipulate the vector registers.
1641  if (Idx != 0)
1642  break;
1643 
1644  RISCVII::VLMUL SubVecLMUL = RISCVTargetLowering::getLMUL(SubVecContainerVT);
1645  bool IsSubVecPartReg = SubVecLMUL == RISCVII::VLMUL::LMUL_F2 ||
1646  SubVecLMUL == RISCVII::VLMUL::LMUL_F4 ||
1647  SubVecLMUL == RISCVII::VLMUL::LMUL_F8;
1648  (void)IsSubVecPartReg; // Silence unused variable warning without asserts.
1649  assert((!IsSubVecPartReg || V.isUndef()) &&
1650  "Expecting lowering to have created legal INSERT_SUBVECTORs when "
1651  "the subvector is smaller than a full-sized register");
1652 
1653  // If we haven't set a SubRegIdx, then we must be going between
1654  // equally-sized LMUL groups (e.g. VR -> VR). This can be done as a copy.
1655  if (SubRegIdx == RISCV::NoSubRegister) {
1656  unsigned InRegClassID = RISCVTargetLowering::getRegClassIDForVecVT(VT);
1658  InRegClassID &&
1659  "Unexpected subvector extraction");
1660  SDValue RC = CurDAG->getTargetConstant(InRegClassID, DL, XLenVT);
1661  SDNode *NewNode = CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS,
1662  DL, VT, SubV, RC);
1663  ReplaceNode(Node, NewNode);
1664  return;
1665  }
1666 
1667  SDValue Insert = CurDAG->getTargetInsertSubreg(SubRegIdx, DL, VT, V, SubV);
1668  ReplaceNode(Node, Insert.getNode());
1669  return;
1670  }
1671  case ISD::EXTRACT_SUBVECTOR: {
1672  SDValue V = Node->getOperand(0);
1673  auto Idx = Node->getConstantOperandVal(1);
1674  MVT InVT = V.getSimpleValueType();
1675  SDLoc DL(V);
1676 
1677  const RISCVTargetLowering &TLI = *Subtarget->getTargetLowering();
1678  MVT SubVecContainerVT = VT;
1679  // Establish the correct scalable-vector types for any fixed-length type.
1680  if (VT.isFixedLengthVector())
1681  SubVecContainerVT = TLI.getContainerForFixedLengthVector(VT);
1682  if (InVT.isFixedLengthVector())
1683  InVT = TLI.getContainerForFixedLengthVector(InVT);
1684 
1685  const auto *TRI = Subtarget->getRegisterInfo();
1686  unsigned SubRegIdx;
1687  std::tie(SubRegIdx, Idx) =
1689  InVT, SubVecContainerVT, Idx, TRI);
1690 
1691  // If the Idx hasn't been completely eliminated then this is a subvector
1692  // extract which doesn't naturally align to a vector register. These must
1693  // be handled using instructions to manipulate the vector registers.
1694  if (Idx != 0)
1695  break;
1696 
1697  // If we haven't set a SubRegIdx, then we must be going between
1698  // equally-sized LMUL types (e.g. VR -> VR). This can be done as a copy.
1699  if (SubRegIdx == RISCV::NoSubRegister) {
1700  unsigned InRegClassID = RISCVTargetLowering::getRegClassIDForVecVT(InVT);
1702  InRegClassID &&
1703  "Unexpected subvector extraction");
1704  SDValue RC = CurDAG->getTargetConstant(InRegClassID, DL, XLenVT);
1705  SDNode *NewNode =
1706  CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS, DL, VT, V, RC);
1707  ReplaceNode(Node, NewNode);
1708  return;
1709  }
1710 
1711  SDValue Extract = CurDAG->getTargetExtractSubreg(SubRegIdx, DL, VT, V);
1712  ReplaceNode(Node, Extract.getNode());
1713  return;
1714  }
1715  case ISD::SPLAT_VECTOR:
1716  case RISCVISD::VMV_S_X_VL:
1717  case RISCVISD::VFMV_S_F_VL:
1718  case RISCVISD::VMV_V_X_VL:
1719  case RISCVISD::VFMV_V_F_VL: {
1720  // Try to match splat of a scalar load to a strided load with stride of x0.
1721  bool IsScalarMove = Node->getOpcode() == RISCVISD::VMV_S_X_VL ||
1722  Node->getOpcode() == RISCVISD::VFMV_S_F_VL;
1723  bool HasPassthruOperand = Node->getOpcode() != ISD::SPLAT_VECTOR;
1724  if (HasPassthruOperand && !Node->getOperand(0).isUndef())
1725  break;
1726  SDValue Src = HasPassthruOperand ? Node->getOperand(1) : Node->getOperand(0);
1727  auto *Ld = dyn_cast<LoadSDNode>(Src);
1728  if (!Ld)
1729  break;
1730  EVT MemVT = Ld->getMemoryVT();
1731  // The memory VT should be the same size as the element type.
1732  if (MemVT.getStoreSize() != VT.getVectorElementType().getStoreSize())
1733  break;
1734  if (!IsProfitableToFold(Src, Node, Node) ||
1735  !IsLegalToFold(Src, Node, Node, TM.getOptLevel()))
1736  break;
1737 
1738  SDValue VL;
1739  if (Node->getOpcode() == ISD::SPLAT_VECTOR)
1741  else if (IsScalarMove) {
1742  // We could deal with more VL if we update the VSETVLI insert pass to
1743  // avoid introducing more VSETVLI.
1744  if (!isOneConstant(Node->getOperand(2)))
1745  break;
1746  selectVLOp(Node->getOperand(2), VL);
1747  } else
1748  selectVLOp(Node->getOperand(2), VL);
1749 
1750  unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
1751  SDValue SEW = CurDAG->getTargetConstant(Log2SEW, DL, XLenVT);
1752 
1753  SDValue Operands[] = {Ld->getBasePtr(),
1754  CurDAG->getRegister(RISCV::X0, XLenVT), VL, SEW,
1755  Ld->getChain()};
1756 
1758  const RISCV::VLEPseudo *P = RISCV::getVLEPseudo(
1759  /*IsMasked*/ false, /*IsTU*/ false, /*IsStrided*/ true, /*FF*/ false,
1760  Log2SEW, static_cast<unsigned>(LMUL));
1761  MachineSDNode *Load =
1762  CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
1763 
1764  CurDAG->setNodeMemRefs(Load, {Ld->getMemOperand()});
1765 
1766  ReplaceNode(Node, Load);
1767  return;
1768  }
1769  }
1770 
1771  // Select the default instruction.
1772  SelectCode(Node);
1773 }
1774 
1776  const SDValue &Op, unsigned ConstraintID, std::vector<SDValue> &OutOps) {
1777  switch (ConstraintID) {
1779  // We just support simple memory operands that have a single address
1780  // operand and need no special handling.
1781  OutOps.push_back(Op);
1782  return false;
1784  OutOps.push_back(Op);
1785  return false;
1786  default:
1787  break;
1788  }
1789 
1790  return true;
1791 }
1792 
1794  SDValue &Offset) {
1795  if (auto *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
1796  Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), Subtarget->getXLenVT());
1797  Offset = CurDAG->getTargetConstant(0, SDLoc(Addr), Subtarget->getXLenVT());
1798  return true;
1799  }
1800 
1801  return false;
1802 }
1803 
1804 // Select a frame index and an optional immediate offset from an ADD or OR.
1806  SDValue &Offset) {
1807  if (SelectAddrFrameIndex(Addr, Base, Offset))
1808  return true;
1809 
1811  return false;
1812 
1813  if (auto *FIN = dyn_cast<FrameIndexSDNode>(Addr.getOperand(0))) {
1814  int64_t CVal = cast<ConstantSDNode>(Addr.getOperand(1))->getSExtValue();
1815  if (isInt<12>(CVal)) {
1816  Base = CurDAG->getTargetFrameIndex(FIN->getIndex(),
1817  Subtarget->getXLenVT());
1818  Offset = CurDAG->getTargetConstant(CVal, SDLoc(Addr),
1819  Subtarget->getXLenVT());
1820  return true;
1821  }
1822  }
1823 
1824  return false;
1825 }
1826 
1828  // If this is FrameIndex, select it directly. Otherwise just let it get
1829  // selected to a register independently.
1830  if (auto *FIN = dyn_cast<FrameIndexSDNode>(Addr))
1831  Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), Subtarget->getXLenVT());
1832  else
1833  Base = Addr;
1834  return true;
1835 }
1836 
1838  SDValue &Offset) {
1839  if (SelectAddrFrameIndex(Addr, Base, Offset))
1840  return true;
1841 
1843  int64_t CVal = cast<ConstantSDNode>(Addr.getOperand(1))->getSExtValue();
1844  if (isInt<12>(CVal)) {
1845  Base = Addr.getOperand(0);
1846  if (auto *FIN = dyn_cast<FrameIndexSDNode>(Base))
1847  Base = CurDAG->getTargetFrameIndex(FIN->getIndex(),
1848  Subtarget->getXLenVT());
1849  Offset = CurDAG->getTargetConstant(CVal, SDLoc(Addr),
1850  Subtarget->getXLenVT());
1851  return true;
1852  }
1853  }
1854 
1855  Base = Addr;
1856  Offset = CurDAG->getTargetConstant(0, SDLoc(Addr), Subtarget->getXLenVT());
1857  return true;
1858 }
1859 
1861  SDValue &ShAmt) {
1862  // Shift instructions on RISCV only read the lower 5 or 6 bits of the shift
1863  // amount. If there is an AND on the shift amount, we can bypass it if it
1864  // doesn't affect any of those bits.
1865  if (N.getOpcode() == ISD::AND && isa<ConstantSDNode>(N.getOperand(1))) {
1866  const APInt &AndMask = N->getConstantOperandAPInt(1);
1867 
1868  // Since the max shift amount is a power of 2 we can subtract 1 to make a
1869  // mask that covers the bits needed to represent all shift amounts.
1870  assert(isPowerOf2_32(ShiftWidth) && "Unexpected max shift amount!");
1871  APInt ShMask(AndMask.getBitWidth(), ShiftWidth - 1);
1872 
1873  if (ShMask.isSubsetOf(AndMask)) {
1874  ShAmt = N.getOperand(0);
1875  return true;
1876  }
1877 
1878  // SimplifyDemandedBits may have optimized the mask so try restoring any
1879  // bits that are known zero.
1880  KnownBits Known = CurDAG->computeKnownBits(N->getOperand(0));
1881  if (ShMask.isSubsetOf(AndMask | Known.Zero)) {
1882  ShAmt = N.getOperand(0);
1883  return true;
1884  }
1885  } else if (N.getOpcode() == ISD::SUB &&
1886  isa<ConstantSDNode>(N.getOperand(0))) {
1887  uint64_t Imm = N.getConstantOperandVal(0);
1888  // If we are shifting by N-X where N == 0 mod Size, then just shift by -X to
1889  // generate a NEG instead of a SUB of a constant.
1890  if (Imm != 0 && Imm % ShiftWidth == 0) {
1891  SDLoc DL(N);
1892  EVT VT = N.getValueType();
1893  SDValue Zero =
1894  CurDAG->getCopyFromReg(CurDAG->getEntryNode(), DL, RISCV::X0, VT);
1895  unsigned NegOpc = VT == MVT::i64 ? RISCV::SUBW : RISCV::SUB;
1896  MachineSDNode *Neg = CurDAG->getMachineNode(NegOpc, DL, VT, Zero,
1897  N.getOperand(1));
1898  ShAmt = SDValue(Neg, 0);
1899  return true;
1900  }
1901  }
1902 
1903  ShAmt = N;
1904  return true;
1905 }
1906 
1908  if (N.getOpcode() == ISD::SIGN_EXTEND_INREG &&
1909  cast<VTSDNode>(N.getOperand(1))->getVT() == MVT::i32) {
1910  Val = N.getOperand(0);
1911  return true;
1912  }
1913  MVT VT = N.getSimpleValueType();
1914  if (CurDAG->ComputeNumSignBits(N) > (VT.getSizeInBits() - 32)) {
1915  Val = N;
1916  return true;
1917  }
1918 
1919  return false;
1920 }
1921 
1923  if (N.getOpcode() == ISD::AND) {
1924  auto *C = dyn_cast<ConstantSDNode>(N.getOperand(1));
1925  if (C && C->getZExtValue() == UINT64_C(0xFFFFFFFF)) {
1926  Val = N.getOperand(0);
1927  return true;
1928  }
1929  }
1930  MVT VT = N.getSimpleValueType();
1932  if (CurDAG->MaskedValueIsZero(N, Mask)) {
1933  Val = N;
1934  return true;
1935  }
1936 
1937  return false;
1938 }
1939 
1940 // Return true if all users of this SDNode* only consume the lower \p Bits.
1941 // This can be used to form W instructions for add/sub/mul/shl even when the
1942 // root isn't a sext_inreg. This can allow the ADDW/SUBW/MULW/SLLIW to CSE if
1943 // SimplifyDemandedBits has made it so some users see a sext_inreg and some
1944 // don't. The sext_inreg+add/sub/mul/shl will get selected, but still leave
1945 // the add/sub/mul/shl to become non-W instructions. By checking the users we
1946 // may be able to use a W instruction and CSE with the other instruction if
1947 // this has happened. We could try to detect that the CSE opportunity exists
1948 // before doing this, but that would be more complicated.
1949 // TODO: Does this need to look through AND/OR/XOR to their users to find more
1950 // opportunities.
1951 bool RISCVDAGToDAGISel::hasAllNBitUsers(SDNode *Node, unsigned Bits) const {
1952  assert((Node->getOpcode() == ISD::ADD || Node->getOpcode() == ISD::SUB ||
1953  Node->getOpcode() == ISD::MUL || Node->getOpcode() == ISD::SHL ||
1954  Node->getOpcode() == ISD::SRL ||
1955  Node->getOpcode() == ISD::SIGN_EXTEND_INREG ||
1956  Node->getOpcode() == RISCVISD::GREV ||
1957  Node->getOpcode() == RISCVISD::GORC ||
1958  isa<ConstantSDNode>(Node)) &&
1959  "Unexpected opcode");
1960 
1961  for (auto UI = Node->use_begin(), UE = Node->use_end(); UI != UE; ++UI) {
1962  SDNode *User = *UI;
1963  // Users of this node should have already been instruction selected
1964  if (!User->isMachineOpcode())
1965  return false;
1966 
1967  // TODO: Add more opcodes?
1968  switch (User->getMachineOpcode()) {
1969  default:
1970  return false;
1971  case RISCV::ADDW:
1972  case RISCV::ADDIW:
1973  case RISCV::SUBW:
1974  case RISCV::MULW:
1975  case RISCV::SLLW:
1976  case RISCV::SLLIW:
1977  case RISCV::SRAW:
1978  case RISCV::SRAIW:
1979  case RISCV::SRLW:
1980  case RISCV::SRLIW:
1981  case RISCV::DIVW:
1982  case RISCV::DIVUW:
1983  case RISCV::REMW:
1984  case RISCV::REMUW:
1985  case RISCV::ROLW:
1986  case RISCV::RORW:
1987  case RISCV::RORIW:
1988  case RISCV::CLZW:
1989  case RISCV::CTZW:
1990  case RISCV::CPOPW:
1991  case RISCV::SLLI_UW:
1992  case RISCV::FMV_W_X:
1993  case RISCV::FCVT_H_W:
1994  case RISCV::FCVT_H_WU:
1995  case RISCV::FCVT_S_W:
1996  case RISCV::FCVT_S_WU:
1997  case RISCV::FCVT_D_W:
1998  case RISCV::FCVT_D_WU:
1999  if (Bits < 32)
2000  return false;
2001  break;
2002  case RISCV::SLLI:
2003  // SLLI only uses the lower (XLen - ShAmt) bits.
2004  if (Bits < Subtarget->getXLen() - User->getConstantOperandVal(1))
2005  return false;
2006  break;
2007  case RISCV::ANDI:
2008  if (Bits < (64 - countLeadingZeros(User->getConstantOperandVal(1))))
2009  return false;
2010  break;
2011  case RISCV::SEXT_B:
2012  if (Bits < 8)
2013  return false;
2014  break;
2015  case RISCV::SEXT_H:
2016  case RISCV::FMV_H_X:
2017  case RISCV::ZEXT_H_RV32:
2018  case RISCV::ZEXT_H_RV64:
2019  if (Bits < 16)
2020  return false;
2021  break;
2022  case RISCV::ADD_UW:
2023  case RISCV::SH1ADD_UW:
2024  case RISCV::SH2ADD_UW:
2025  case RISCV::SH3ADD_UW:
2026  // The first operand to add.uw/shXadd.uw is implicitly zero extended from
2027  // 32 bits.
2028  if (UI.getOperandNo() != 0 || Bits < 32)
2029  return false;
2030  break;
2031  case RISCV::SB:
2032  if (UI.getOperandNo() != 0 || Bits < 8)
2033  return false;
2034  break;
2035  case RISCV::SH:
2036  if (UI.getOperandNo() != 0 || Bits < 16)
2037  return false;
2038  break;
2039  case RISCV::SW:
2040  if (UI.getOperandNo() != 0 || Bits < 32)
2041  return false;
2042  break;
2043  }
2044  }
2045 
2046  return true;
2047 }
2048 
2049 // Select VL as a 5 bit immediate or a value that will become a register. This
2050 // allows us to choose betwen VSETIVLI or VSETVLI later.
2052  auto *C = dyn_cast<ConstantSDNode>(N);
2053  if (C && isUInt<5>(C->getZExtValue())) {
2054  VL = CurDAG->getTargetConstant(C->getZExtValue(), SDLoc(N),
2055  N->getValueType(0));
2056  } else if (C && C->isAllOnesValue()) {
2057  // Treat all ones as VLMax.
2059  N->getValueType(0));
2060  } else if (isa<RegisterSDNode>(N) &&
2061  cast<RegisterSDNode>(N)->getReg() == RISCV::X0) {
2062  // All our VL operands use an operand that allows GPRNoX0 or an immediate
2063  // as the register class. Convert X0 to a special immediate to pass the
2064  // MachineVerifier. This is recognized specially by the vsetvli insertion
2065  // pass.
2067  N->getValueType(0));
2068  } else {
2069  VL = N;
2070  }
2071 
2072  return true;
2073 }
2074 
2076  if (N.getOpcode() != RISCVISD::VMV_V_X_VL || !N.getOperand(0).isUndef())
2077  return false;
2078  SplatVal = N.getOperand(1);
2079  return true;
2080 }
2081 
2082 using ValidateFn = bool (*)(int64_t);
2083 
2084 static bool selectVSplatSimmHelper(SDValue N, SDValue &SplatVal,
2085  SelectionDAG &DAG,
2086  const RISCVSubtarget &Subtarget,
2087  ValidateFn ValidateImm) {
2088  if (N.getOpcode() != RISCVISD::VMV_V_X_VL || !N.getOperand(0).isUndef() ||
2089  !isa<ConstantSDNode>(N.getOperand(1)))
2090  return false;
2091 
2092  int64_t SplatImm =
2093  cast<ConstantSDNode>(N.getOperand(1))->getSExtValue();
2094 
2095  // The semantics of RISCVISD::VMV_V_X_VL is that when the operand
2096  // type is wider than the resulting vector element type: an implicit
2097  // truncation first takes place. Therefore, perform a manual
2098  // truncation/sign-extension in order to ignore any truncated bits and catch
2099  // any zero-extended immediate.
2100  // For example, we wish to match (i8 -1) -> (XLenVT 255) as a simm5 by first
2101  // sign-extending to (XLenVT -1).
2102  MVT XLenVT = Subtarget.getXLenVT();
2103  assert(XLenVT == N.getOperand(1).getSimpleValueType() &&
2104  "Unexpected splat operand type");
2105  MVT EltVT = N.getSimpleValueType().getVectorElementType();
2106  if (EltVT.bitsLT(XLenVT))
2107  SplatImm = SignExtend64(SplatImm, EltVT.getSizeInBits());
2108 
2109  if (!ValidateImm(SplatImm))
2110  return false;
2111 
2112  SplatVal = DAG.getTargetConstant(SplatImm, SDLoc(N), XLenVT);
2113  return true;
2114 }
2115 
2117  return selectVSplatSimmHelper(N, SplatVal, *CurDAG, *Subtarget,
2118  [](int64_t Imm) { return isInt<5>(Imm); });
2119 }
2120 
2122  return selectVSplatSimmHelper(
2123  N, SplatVal, *CurDAG, *Subtarget,
2124  [](int64_t Imm) { return (isInt<5>(Imm) && Imm != -16) || Imm == 16; });
2125 }
2126 
2128  SDValue &SplatVal) {
2129  return selectVSplatSimmHelper(
2130  N, SplatVal, *CurDAG, *Subtarget, [](int64_t Imm) {
2131  return Imm != 0 && ((isInt<5>(Imm) && Imm != -16) || Imm == 16);
2132  });
2133 }
2134 
2136  if (N.getOpcode() != RISCVISD::VMV_V_X_VL || !N.getOperand(0).isUndef() ||
2137  !isa<ConstantSDNode>(N.getOperand(1)))
2138  return false;
2139 
2140  int64_t SplatImm =
2141  cast<ConstantSDNode>(N.getOperand(1))->getSExtValue();
2142 
2143  if (!isUInt<5>(SplatImm))
2144  return false;
2145 
2146  SplatVal =
2147  CurDAG->getTargetConstant(SplatImm, SDLoc(N), Subtarget->getXLenVT());
2148 
2149  return true;
2150 }
2151 
2153  SDValue &Imm) {
2154  if (auto *C = dyn_cast<ConstantSDNode>(N)) {
2155  int64_t ImmVal = SignExtend64(C->getSExtValue(), Width);
2156 
2157  if (!isInt<5>(ImmVal))
2158  return false;
2159 
2160  Imm = CurDAG->getTargetConstant(ImmVal, SDLoc(N), Subtarget->getXLenVT());
2161  return true;
2162  }
2163 
2164  return false;
2165 }
2166 
2167 // Merge an ADDI into the offset of a load/store instruction where possible.
2168 // (load (addi base, off1), off2) -> (load base, off1+off2)
2169 // (store val, (addi base, off1), off2) -> (store val, base, off1+off2)
2170 // (load (add base, (addi src, off1)), off2)
2171 // -> (load (add base, src), off1+off2)
2172 // (store val, (add base, (addi src, off1)), off2)
2173 // -> (store val, (add base, src), off1+off2)
2174 // This is possible when off1+off2 fits a 12-bit immediate.
2175 bool RISCVDAGToDAGISel::doPeepholeLoadStoreADDI(SDNode *N) {
2176  unsigned OffsetOpIdx, BaseOpIdx;
2177  if (!hasMemOffset(N, BaseOpIdx, OffsetOpIdx))
2178  return false;
2179 
2180  if (!isa<ConstantSDNode>(N->getOperand(OffsetOpIdx)))
2181  return false;
2182 
2183  SDValue Base = N->getOperand(BaseOpIdx);
2184 
2185  if (!Base.isMachineOpcode())
2186  return false;
2187 
2188  if (Base.getMachineOpcode() == RISCV::ADDI) {
2189  // If the base is an ADDI, we can merge it in to the load/store.
2190  } else if (Base.getMachineOpcode() == RISCV::ADDIW &&
2191  isa<ConstantSDNode>(Base.getOperand(1)) &&
2192  Base.getOperand(0).isMachineOpcode() &&
2193  Base.getOperand(0).getMachineOpcode() == RISCV::LUI &&
2194  isa<ConstantSDNode>(Base.getOperand(0).getOperand(0))) {
2195  // ADDIW can be merged if it's part of LUI+ADDIW constant materialization
2196  // and LUI+ADDI would have produced the same result. This is true for all
2197  // simm32 values except 0x7ffff800-0x7fffffff.
2198  int64_t Offset =
2199  SignExtend64<32>(Base.getOperand(0).getConstantOperandVal(0) << 12);
2200  Offset += cast<ConstantSDNode>(Base.getOperand(1))->getSExtValue();
2201  if (!isInt<32>(Offset))
2202  return false;
2203  } else
2204  return false;
2205 
2206  SDValue ImmOperand = Base.getOperand(1);
2207  uint64_t Offset2 = N->getConstantOperandVal(OffsetOpIdx);
2208 
2209  if (auto *Const = dyn_cast<ConstantSDNode>(ImmOperand)) {
2210  int64_t Offset1 = Const->getSExtValue();
2211  int64_t CombinedOffset = Offset1 + Offset2;
2212  if (!isInt<12>(CombinedOffset))
2213  return false;
2214  ImmOperand = CurDAG->getTargetConstant(CombinedOffset, SDLoc(ImmOperand),
2215  ImmOperand.getValueType());
2216  } else if (auto *GA = dyn_cast<GlobalAddressSDNode>(ImmOperand)) {
2217  // If the off1 in (addi base, off1) is a global variable's address (its
2218  // low part, really), then we can rely on the alignment of that variable
2219  // to provide a margin of safety before off1 can overflow the 12 bits.
2220  // Check if off2 falls within that margin; if so off1+off2 can't overflow.
2221  const DataLayout &DL = CurDAG->getDataLayout();
2222  Align Alignment = GA->getGlobal()->getPointerAlignment(DL);
2223  if (Offset2 != 0 && Alignment <= Offset2)
2224  return false;
2225  int64_t Offset1 = GA->getOffset();
2226  int64_t CombinedOffset = Offset1 + Offset2;
2227  ImmOperand = CurDAG->getTargetGlobalAddress(
2228  GA->getGlobal(), SDLoc(ImmOperand), ImmOperand.getValueType(),
2229  CombinedOffset, GA->getTargetFlags());
2230  } else if (auto *CP = dyn_cast<ConstantPoolSDNode>(ImmOperand)) {
2231  // Ditto.
2232  Align Alignment = CP->getAlign();
2233  if (Offset2 != 0 && Alignment <= Offset2)
2234  return false;
2235  int64_t Offset1 = CP->getOffset();
2236  int64_t CombinedOffset = Offset1 + Offset2;
2237  ImmOperand = CurDAG->getTargetConstantPool(
2238  CP->getConstVal(), ImmOperand.getValueType(), CP->getAlign(),
2239  CombinedOffset, CP->getTargetFlags());
2240  } else {
2241  return false;
2242  }
2243 
2244  LLVM_DEBUG(dbgs() << "Folding add-immediate into mem-op:\nBase: ");
2245  LLVM_DEBUG(Base->dump(CurDAG));
2246  LLVM_DEBUG(dbgs() << "\nN: ");
2247  LLVM_DEBUG(N->dump(CurDAG));
2248  LLVM_DEBUG(dbgs() << "\n");
2249 
2250  // Modify the offset operand of the load/store.
2251  if (BaseOpIdx == 0) { // Load
2252  N = CurDAG->UpdateNodeOperands(N, Base.getOperand(0), ImmOperand,
2253  N->getOperand(2));
2254  } else { // Store
2255  N = CurDAG->UpdateNodeOperands(N, N->getOperand(0), Base.getOperand(0),
2256  ImmOperand, N->getOperand(3));
2257  }
2258 
2259  return true;
2260 }
2261 
2262 // Try to remove sext.w if the input is a W instruction or can be made into
2263 // a W instruction cheaply.
2264 bool RISCVDAGToDAGISel::doPeepholeSExtW(SDNode *N) {
2265  // Look for the sext.w pattern, addiw rd, rs1, 0.
2266  if (N->getMachineOpcode() != RISCV::ADDIW ||
2267  !isNullConstant(N->getOperand(1)))
2268  return false;
2269 
2270  SDValue N0 = N->getOperand(0);
2271  if (!N0.isMachineOpcode())
2272  return false;
2273 
2274  switch (N0.getMachineOpcode()) {
2275  default:
2276  break;
2277  case RISCV::ADD:
2278  case RISCV::ADDI:
2279  case RISCV::SUB:
2280  case RISCV::MUL:
2281  case RISCV::SLLI: {
2282  // Convert sext.w+add/sub/mul to their W instructions. This will create
2283  // a new independent instruction. This improves latency.
2284  unsigned Opc;
2285  switch (N0.getMachineOpcode()) {
2286  default:
2287  llvm_unreachable("Unexpected opcode!");
2288  case RISCV::ADD: Opc = RISCV::ADDW; break;
2289  case RISCV::ADDI: Opc = RISCV::ADDIW; break;
2290  case RISCV::SUB: Opc = RISCV::SUBW; break;
2291  case RISCV::MUL: Opc = RISCV::MULW; break;
2292  case RISCV::SLLI: Opc = RISCV::SLLIW; break;
2293  }
2294 
2295  SDValue N00 = N0.getOperand(0);
2296  SDValue N01 = N0.getOperand(1);
2297 
2298  // Shift amount needs to be uimm5.
2299  if (N0.getMachineOpcode() == RISCV::SLLI &&
2300  !isUInt<5>(cast<ConstantSDNode>(N01)->getSExtValue()))
2301  break;
2302 
2303  SDNode *Result =
2304  CurDAG->getMachineNode(Opc, SDLoc(N), N->getValueType(0),
2305  N00, N01);
2306  ReplaceUses(N, Result);
2307  return true;
2308  }
2309  case RISCV::ADDW:
2310  case RISCV::ADDIW:
2311  case RISCV::SUBW:
2312  case RISCV::MULW:
2313  case RISCV::SLLIW:
2314  case RISCV::GREVIW:
2315  case RISCV::GORCIW:
2316  // Result is already sign extended just remove the sext.w.
2317  // NOTE: We only handle the nodes that are selected with hasAllWUsers.
2318  ReplaceUses(N, N0.getNode());
2319  return true;
2320  }
2321 
2322  return false;
2323 }
2324 
2325 // Optimize masked RVV pseudo instructions with a known all-ones mask to their
2326 // corresponding "unmasked" pseudo versions. The mask we're interested in will
2327 // take the form of a V0 physical register operand, with a glued
2328 // register-setting instruction.
2329 bool RISCVDAGToDAGISel::doPeepholeMaskedRVV(SDNode *N) {
2331  RISCV::getMaskedPseudoInfo(N->getMachineOpcode());
2332  if (!I)
2333  return false;
2334 
2335  unsigned MaskOpIdx = I->MaskOpIdx;
2336 
2337  // Check that we're using V0 as a mask register.
2338  if (!isa<RegisterSDNode>(N->getOperand(MaskOpIdx)) ||
2339  cast<RegisterSDNode>(N->getOperand(MaskOpIdx))->getReg() != RISCV::V0)
2340  return false;
2341 
2342  // The glued user defines V0.
2343  const auto *Glued = N->getGluedNode();
2344 
2345  if (!Glued || Glued->getOpcode() != ISD::CopyToReg)
2346  return false;
2347 
2348  // Check that we're defining V0 as a mask register.
2349  if (!isa<RegisterSDNode>(Glued->getOperand(1)) ||
2350  cast<RegisterSDNode>(Glued->getOperand(1))->getReg() != RISCV::V0)
2351  return false;
2352 
2353  // Check the instruction defining V0; it needs to be a VMSET pseudo.
2354  SDValue MaskSetter = Glued->getOperand(2);
2355 
2356  const auto IsVMSet = [](unsigned Opc) {
2357  return Opc == RISCV::PseudoVMSET_M_B1 || Opc == RISCV::PseudoVMSET_M_B16 ||
2358  Opc == RISCV::PseudoVMSET_M_B2 || Opc == RISCV::PseudoVMSET_M_B32 ||
2359  Opc == RISCV::PseudoVMSET_M_B4 || Opc == RISCV::PseudoVMSET_M_B64 ||
2360  Opc == RISCV::PseudoVMSET_M_B8;
2361  };
2362 
2363  // TODO: Check that the VMSET is the expected bitwidth? The pseudo has
2364  // undefined behaviour if it's the wrong bitwidth, so we could choose to
2365  // assume that it's all-ones? Same applies to its VL.
2366  if (!MaskSetter->isMachineOpcode() || !IsVMSet(MaskSetter.getMachineOpcode()))
2367  return false;
2368 
2369  // Retrieve the tail policy operand index, if any.
2370  Optional<unsigned> TailPolicyOpIdx;
2371  const RISCVInstrInfo &TII = *Subtarget->getInstrInfo();
2372  const MCInstrDesc &MaskedMCID = TII.get(N->getMachineOpcode());
2373 
2374  bool IsTA = true;
2375  if (RISCVII::hasVecPolicyOp(MaskedMCID.TSFlags)) {
2376  // The last operand of the pseudo is the policy op, but we might have a
2377  // Glue operand last. We might also have a chain.
2378  TailPolicyOpIdx = N->getNumOperands() - 1;
2379  if (N->getOperand(*TailPolicyOpIdx).getValueType() == MVT::Glue)
2380  (*TailPolicyOpIdx)--;
2381  if (N->getOperand(*TailPolicyOpIdx).getValueType() == MVT::Other)
2382  (*TailPolicyOpIdx)--;
2383 
2384  if (!(N->getConstantOperandVal(*TailPolicyOpIdx) &
2386  // Keep the true-masked instruction when there is no unmasked TU
2387  // instruction
2388  if (I->UnmaskedTUPseudo == I->MaskedPseudo && !N->getOperand(0).isUndef())
2389  return false;
2390  // We can't use TA if the tie-operand is not IMPLICIT_DEF
2391  if (!N->getOperand(0).isUndef())
2392  IsTA = false;
2393  }
2394  }
2395 
2396  unsigned Opc = IsTA ? I->UnmaskedPseudo : I->UnmaskedTUPseudo;
2397 
2398  // Check that we're dropping the mask operand and any policy operand
2399  // when we transform to this unmasked pseudo. Additionally, if this insturtion
2400  // is tail agnostic, the unmasked instruction should not have a merge op.
2401  uint64_t TSFlags = TII.get(Opc).TSFlags;
2402  assert((IsTA != RISCVII::hasMergeOp(TSFlags)) &&
2403  RISCVII::hasDummyMaskOp(TSFlags) &&
2404  !RISCVII::hasVecPolicyOp(TSFlags) &&
2405  "Unexpected pseudo to transform to");
2406  (void)TSFlags;
2407 
2409  // Skip the merge operand at index 0 if IsTA
2410  for (unsigned I = IsTA, E = N->getNumOperands(); I != E; I++) {
2411  // Skip the mask, the policy, and the Glue.
2412  SDValue Op = N->getOperand(I);
2413  if (I == MaskOpIdx || I == TailPolicyOpIdx ||
2414  Op.getValueType() == MVT::Glue)
2415  continue;
2416  Ops.push_back(Op);
2417  }
2418 
2419  // Transitively apply any node glued to our new node.
2420  if (auto *TGlued = Glued->getGluedNode())
2421  Ops.push_back(SDValue(TGlued, TGlued->getNumValues() - 1));
2422 
2423  SDNode *Result = CurDAG->getMachineNode(Opc, SDLoc(N), N->getVTList(), Ops);
2424  ReplaceUses(N, Result);
2425 
2426  return true;
2427 }
2428 
2429 // This pass converts a legalized DAG into a RISCV-specific DAG, ready
2430 // for instruction scheduling.
2432  CodeGenOpt::Level OptLevel) {
2433  return new RISCVDAGToDAGISel(TM, OptLevel);
2434 }
llvm::ISD::SUB
@ SUB
Definition: ISDOpcodes.h:240
llvm::RISCVII::LMUL_1
@ LMUL_1
Definition: RISCVBaseInfo.h:109
llvm::TargetMachine::getOptLevel
CodeGenOpt::Level getOptLevel() const
Returns the optimization level: None, Less, Default, or Aggressive.
Definition: TargetMachine.cpp:186
llvm::RISCVISD::VFMV_S_F_VL
@ VFMV_S_F_VL
Definition: RISCVISelLowering.h:162
llvm::RISCVMatInt::Inst
Definition: RISCVMatInt.h:28
llvm::MVT::getVectorElementType
MVT getVectorElementType() const
Definition: MachineValueType.h:528
B1
llvm::MVT::getStoreSize
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
Definition: MachineValueType.h:1101
llvm::ISD::INTRINSIC_VOID
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
Definition: ISDOpcodes.h:199
llvm::RISCVDAGToDAGISel::selectVLXSEG
void selectVLXSEG(SDNode *Node, bool IsMasked, bool IsOrdered)
Definition: RISCVISelDAGToDAG.cpp:414
MathExtras.h
Merge
R600 Clause Merge
Definition: R600ClauseMergePass.cpp:70
llvm::SelectionDAGISel::TLI
const TargetLowering * TLI
Definition: SelectionDAGISel.h:54
llvm
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:17
llvm::RISCVISD::SLLW
@ SLLW
Definition: RISCVISelLowering.h:63
llvm::SelectionDAGISel::TM
TargetMachine & TM
Definition: SelectionDAGISel.h:42
llvm::RISCV::VLSEGPseudo
Definition: RISCVISelDAGToDAG.h:127
llvm::SDLoc
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Definition: SelectionDAGNodes.h:1090
llvm::MVT::isFixedLengthVector
bool isFixedLengthVector() const
Definition: MachineValueType.h:386
llvm::RISCVDAGToDAGISel::selectVSplatSimm5Plus1
bool selectVSplatSimm5Plus1(SDValue N, SDValue &SplatVal)
Definition: RISCVISelDAGToDAG.cpp:2121
llvm::DataLayout
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:113
llvm::MVT::isInteger
bool isInteger() const
Return true if this is an integer or a vector integer type.
Definition: MachineValueType.h:358
llvm::RISCVDAGToDAGISel::PreprocessISelDAG
void PreprocessISelDAG() override
PreprocessISelDAG - This hook allows targets to hack on the graph before instruction selection starts...
Definition: RISCVISelDAGToDAG.cpp:45
llvm::ISD::BITCAST
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
Definition: ISDOpcodes.h:886
Insert
Vector Rotate Left Mask Mask Insert
Definition: README_P9.txt:112
llvm::RISCVSubtarget::getTargetLowering
const RISCVTargetLowering * getTargetLowering() const override
Definition: RISCVSubtarget.h:133
llvm::SelectionDAG::getCopyToReg
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, unsigned Reg, SDValue N)
Definition: SelectionDAG.h:750
llvm::RISCV::VLXSEGPseudo
Definition: RISCVISelDAGToDAG.h:138
llvm::SDValue::getNode
SDNode * getNode() const
get the SDNode which holds the desired result
Definition: SelectionDAGNodes.h:151
llvm::RISCVDAGToDAGISel::selectZExti32
bool selectZExti32(SDValue N, SDValue &Val)
Definition: RISCVISelDAGToDAG.cpp:1922
llvm::isOneConstant
bool isOneConstant(SDValue V)
Returns true if V is a constant integer one.
Definition: SelectionDAG.cpp:10523
llvm::SelectionDAG::allnodes_end
allnodes_const_iterator allnodes_end() const
Definition: SelectionDAG.h:509
P
This currently compiles esp xmm0 movsd esp eax eax esp ret We should use not the dag combiner This is because dagcombine2 needs to be able to see through the X86ISD::Wrapper which DAGCombine can t really do The code for turning x load into a single vector load is target independent and should be moved to the dag combiner The code for turning x load into a vector load can only handle a direct load from a global or a direct load from the stack It should be generalized to handle any load from P
Definition: README-SSE.txt:411
llvm::RISCVISD::FMV_H_X
@ FMV_H_X
Definition: RISCVISelLowering.h:99
llvm::SDNode::isUndef
bool isUndef() const
Return true if the type of the node type undefined.
Definition: SelectionDAGNodes.h:655
llvm::ARM_MB::LD
@ LD
Definition: ARMBaseInfo.h:72
llvm::KnownBits::Zero
APInt Zero
Definition: KnownBits.h:24
C1
instcombine should handle this C2 when C1
Definition: README.txt:263
llvm::RISCVISD::DIVUW
@ DIVUW
Definition: RISCVISelLowering.h:70
llvm::MVT::bitsLT
bool bitsLT(MVT VT) const
Return true if this has less bits than VT.
Definition: MachineValueType.h:1163
llvm::SelectionDAG::getFrameIndex
SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
Definition: SelectionDAG.cpp:1679
llvm::SmallVector
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1185
llvm::RISCVDAGToDAGISel::selectVSETVLI
void selectVSETVLI(SDNode *Node)
Definition: RISCVISelDAGToDAG.cpp:542
llvm::SelectionDAG::getVTList
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
Definition: SelectionDAG.cpp:9108
llvm::MachineSDNode
An SDNode that represents everything that will be needed to construct a MachineInstr.
Definition: SelectionDAGNodes.h:2867
llvm::RISCVSubtarget::hasVInstructions
bool hasVInstructions() const
Definition: RISCVSubtarget.h:223
llvm::RISCVDAGToDAGISel::SelectFrameAddrRegImm
bool SelectFrameAddrRegImm(SDValue Addr, SDValue &Base, SDValue &Offset)
Definition: RISCVISelDAGToDAG.cpp:1805
llvm::SelectionDAG::allnodes_begin
allnodes_const_iterator allnodes_begin() const
Definition: SelectionDAG.h:508
llvm::SelectionDAG::getRoot
const SDValue & getRoot() const
Return the root tag of the SelectionDAG.
Definition: SelectionDAG.h:528
llvm::HandleSDNode
This class is used to form a handle around another node that is persistent and is updated across invo...
Definition: SelectionDAGNodes.h:1217
llvm::RISCVMatInt::generateInstSeq
InstSeq generateInstSeq(int64_t Val, const FeatureBitset &ActiveFeatures)
Definition: RISCVMatInt.cpp:177
llvm::SDNode
Represents one node in the SelectionDAG.
Definition: SelectionDAGNodes.h:454
llvm::RISCVTargetMachine
Definition: RISCVTargetMachine.h:23
llvm::RISCVDAGToDAGISel::selectVSplat
bool selectVSplat(SDValue N, SDValue &SplatVal)
Definition: RISCVISelDAGToDAG.cpp:2075
llvm::RISCVII::LMUL_8
@ LMUL_8
Definition: RISCVBaseInfo.h:112
llvm::MVT::Glue
@ Glue
Definition: MachineValueType.h:270
llvm::MemOp
Definition: TargetLowering.h:111
llvm::RISCVDAGToDAGISel
Definition: RISCVISelDAGToDAG.h:23
llvm::SelectionDAG::getMemBasePlusOffset
SDValue getMemBasePlusOffset(SDValue Base, TypeSize Offset, const SDLoc &DL, const SDNodeFlags Flags=SDNodeFlags())
Returns sum of the base pointer and offset.
Definition: SelectionDAG.cpp:6608
llvm::RISCVSubtarget::hasStdExtZbs
bool hasStdExtZbs() const
Definition: RISCVSubtarget.h:162
llvm::APInt::getBitWidth
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition: APInt.h:1411
llvm::tgtok::Bits
@ Bits
Definition: TGLexer.h:50
llvm::SelectionDAG::getStore
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
Definition: SelectionDAG.cpp:7804
llvm::InlineAsm::Constraint_m
@ Constraint_m
Definition: InlineAsm.h:255
llvm::SelectionDAG::isBaseWithConstantOffset
bool isBaseWithConstantOffset(SDValue Op) const
Return true if the specified operand is an ISD::ADD with a ConstantSDNode on the right-hand side,...
Definition: SelectionDAG.cpp:4524
llvm::RISCV::VLX_VSXPseudo
Definition: RISCVISelDAGToDAG.h:186
llvm::Optional< unsigned >
llvm::SelectionDAG::RemoveDeadNodes
void RemoveDeadNodes()
This method deletes all unreachable nodes in the SelectionDAG.
Definition: SelectionDAG.cpp:900
llvm::RISCVTargetLowering::getRegClassIDForVecVT
static unsigned getRegClassIDForVecVT(MVT VT)
Definition: RISCVISelLowering.cpp:1433
llvm::RISCV::VLMaxSentinel
static constexpr int64_t VLMaxSentinel
Definition: RISCVInstrInfo.h:199
llvm::isPowerOf2_32
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition: MathExtras.h:491
llvm::RISCVDAGToDAGISel::selectVSSEG
void selectVSSEG(SDNode *Node, bool IsMasked, bool IsStrided)
Definition: RISCVISelDAGToDAG.cpp:468
llvm::RISCVII::hasVecPolicyOp
static bool hasVecPolicyOp(uint64_t TSFlags)
Definition: RISCVBaseInfo.h:159
RISCVMatInt.h
TRI
unsigned const TargetRegisterInfo * TRI
Definition: MachineSink.cpp:1628
LLVM_DEBUG
#define LLVM_DEBUG(X)
Definition: Debug.h:101
llvm::RISCVDAGToDAGISel::selectVSplatSimm5Plus1NonZero
bool selectVSplatSimm5Plus1NonZero(SDValue N, SDValue &SplatVal)
Definition: RISCVISelDAGToDAG.cpp:2127
llvm::RISCVDAGToDAGISel::SelectBaseAddr
bool SelectBaseAddr(SDValue Addr, SDValue &Base)
Definition: RISCVISelDAGToDAG.cpp:1827
KnownBits.h
llvm::MVT::isScalableVector
bool isScalableVector() const
Return true if this is a vector value type where the runtime length is machine dependent.
Definition: MachineValueType.h:381
llvm::SelectionDAG::getRegister
SDValue getRegister(unsigned Reg, EVT VT)
Definition: SelectionDAG.cpp:2061
llvm::RISCVDAGToDAGISel::SelectAddrFrameIndex
bool SelectAddrFrameIndex(SDValue Addr, SDValue &Base, SDValue &Offset)
Definition: RISCVISelDAGToDAG.cpp:1793
llvm::dbgs
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
llvm::RISCVSubtarget::is64Bit
bool is64Bit() const
Definition: RISCVSubtarget.h:181
llvm::RISCV::VSSEGPseudo
Definition: RISCVISelDAGToDAG.h:149
llvm::BitmaskEnumDetail::Mask
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
Definition: BitmaskEnum.h:80
llvm::RISCVII::LMUL_4
@ LMUL_4
Definition: RISCVBaseInfo.h:111
llvm::RISCV::RISCVMaskedPseudoInfo
Definition: RISCVISelDAGToDAG.h:196
llvm::SPII::Load
@ Load
Definition: SparcInstrInfo.h:32
llvm::all_of
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1617
llvm::EVT::getStoreSize
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
Definition: ValueTypes.h:362
llvm::MCInstrDesc::TSFlags
uint64_t TSFlags
Definition: MCInstrDesc.h:205
llvm::RISCVDAGToDAGISel::selectShiftMask
bool selectShiftMask(SDValue N, unsigned ShiftWidth, SDValue &ShAmt)
Definition: RISCVISelDAGToDAG.cpp:1860
llvm::SelectionDAG::getTargetFrameIndex
SDValue getTargetFrameIndex(int FI, EVT VT)
Definition: SelectionDAG.h:703
llvm::SDValue::getValueType
EVT getValueType() const
Return the ValueType of the referenced return value.
Definition: SelectionDAGNodes.h:1125
llvm::SelectionDAG
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
Definition: SelectionDAG.h:220
llvm::SelectionDAG::UpdateNodeOperands
SDNode * UpdateNodeOperands(SDNode *N, SDValue Op)
Mutate the specified node in-place to have the specified operands.
Definition: SelectionDAG.cpp:9198
llvm::ISD::Constant
@ Constant
Definition: ISDOpcodes.h:76
getReg
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
Definition: MipsDisassembler.cpp:517
E
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
llvm::MachineFunction::getInfo
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
Definition: MachineFunction.h:754
llvm::User
Definition: User.h:44
llvm::SelectionDAG::getUNDEF
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
Definition: SelectionDAG.h:971
llvm::ISD::CopyToReg
@ CopyToReg
CopyToReg - This node has three operands: a chain, a register number to set to this value,...
Definition: ISDOpcodes.h:203
llvm::ISD::SIGN_EXTEND_INREG
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
Definition: ISDOpcodes.h:781
createTuple
static SDValue createTuple(SelectionDAG &CurDAG, ArrayRef< SDValue > Regs, unsigned NF, RISCVII::VLMUL LMUL)
Definition: RISCVISelDAGToDAG.cpp:223
llvm::SelectionDAG::getTargetLoweringInfo
const TargetLowering & getTargetLoweringInfo() const
Definition: SelectionDAG.h:458
llvm::EVT
Extended Value Type.
Definition: ValueTypes.h:34
C
(vector float) vec_cmpeq(*A, *B) C
Definition: README_ALTIVEC.txt:86
llvm::isShiftedMask_64
constexpr bool isShiftedMask_64(uint64_t Value)
Return true if the argument contains a non-empty sequence of ones with the remainder zero (64 bit ver...
Definition: MathExtras.h:485
llvm::TargetLowering
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
Definition: TargetLowering.h:3412
llvm::MVT::getScalarSizeInBits
uint64_t getScalarSizeInBits() const
Definition: MachineValueType.h:1091
llvm::SelectionDAG::MaskedValueIsZero
bool MaskedValueIsZero(SDValue Op, const APInt &Mask, unsigned Depth=0) const
Return true if 'Op & Mask' is known to be zero.
Definition: SelectionDAG.cpp:2514
llvm::RISCVTargetLowering::getSubregIndexByMVT
static unsigned getSubregIndexByMVT(MVT VT, unsigned Index)
Definition: RISCVISelLowering.cpp:1410
llvm::ms_demangle::QualifierMangleMode::Result
@ Result
llvm::ISD::SRA
@ SRA
Definition: ISDOpcodes.h:692
llvm::RISCVSubtarget::getXLenVT
MVT getXLenVT() const
Definition: RISCVSubtarget.h:189
RISCVISelDAGToDAG.h
llvm::SelectionDAGISel::ReplaceNode
void ReplaceNode(SDNode *F, SDNode *T)
Replace all uses of F with T, then remove F from the DAG.
Definition: SelectionDAGISel.h:229
llvm::TypeSize::Fixed
static TypeSize Fixed(ScalarTy MinVal)
Definition: TypeSize.h:441
llvm::Log2_32
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition: MathExtras.h:623
llvm::MCInstrDesc
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:197
llvm::SelectionDAG::setRoot
const SDValue & setRoot(SDValue N)
Set the current root tag of the SelectionDAG.
Definition: SelectionDAG.h:537
llvm::RISCVVType::decodeVSEW
static unsigned decodeVSEW(unsigned VSEW)
Definition: RISCVBaseInfo.h:412
RISCVMCTargetDesc.h
llvm::report_fatal_error
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:143
llvm::APInt::getHighBitsSet
static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet)
Constructs an APInt value that has the top hiBitsSet bits set.
Definition: APInt.h:279
llvm::RISCVDAGToDAGISel::SelectInlineAsmMemoryOperand
bool SelectInlineAsmMemoryOperand(const SDValue &Op, unsigned ConstraintID, std::vector< SDValue > &OutOps) override
SelectInlineAsmMemoryOperand - Select the specified address as a target addressing mode,...
Definition: RISCVISelDAGToDAG.cpp:1775
llvm::SelectionDAG::getMemIntrinsicNode
SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef< SDValue > Ops, EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags Flags=MachineMemOperand::MOLoad|MachineMemOperand::MOStore, uint64_t Size=0, const AAMDNodes &AAInfo=AAMDNodes())
Creates a MemIntrinsicNode that may produce a result and takes a list of operands.
Definition: SelectionDAG.cpp:7529
llvm::RISCVSubtarget::hasStdExtZbb
bool hasStdExtZbb() const
Definition: RISCVSubtarget.h:155
llvm::ISD::AND
@ AND
Bitwise operators - logical and, logical or, logical xor.
Definition: ISDOpcodes.h:666
llvm::RISCVSubtarget::getInstrInfo
const RISCVInstrInfo * getInstrInfo() const override
Definition: RISCVSubtarget.h:129
CASE_VMSLT_OPCODES
#define CASE_VMSLT_OPCODES(lmulenum, suffix, suffix_b)
Align
uint64_t Align
Definition: ELFObjHandler.cpp:81
llvm::ISD::SPLAT_VECTOR
@ SPLAT_VECTOR
SPLAT_VECTOR(VAL) - Returns a vector with the scalar value VAL duplicated in all lanes.
Definition: ISDOpcodes.h:613
llvm::RISCVSubtarget::hasStdExtZbp
bool hasStdExtZbp() const
Definition: RISCVSubtarget.h:160
llvm::Align
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
llvm::SPII::Store
@ Store
Definition: SparcInstrInfo.h:33
llvm::SDValue::getConstantOperandVal
uint64_t getConstantOperandVal(unsigned i) const
Definition: SelectionDAGNodes.h:1137
llvm::RISCVISD::DIVW
@ DIVW
Definition: RISCVISelLowering.h:69
X
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
llvm::SelectionDAG::getTargetGlobalAddress
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
Definition: SelectionDAG.h:698
llvm::RISCVISD::CLZW
@ CLZW
Definition: RISCVISelLowering.h:78
llvm::RISCVDAGToDAGISel::SelectAddrRegImm
bool SelectAddrRegImm(SDValue Addr, SDValue &Base, SDValue &Offset)
Definition: RISCVISelDAGToDAG.cpp:1837
Operands
mir Rename Register Operands
Definition: MIRNamerPass.cpp:74
llvm::APInt::isSubsetOf
bool isSubsetOf(const APInt &RHS) const
This operation checks that all bits set in this APInt are also set in RHS.
Definition: APInt.h:1207
llvm::createRISCVISelDag
FunctionPass * createRISCVISelDag(RISCVTargetMachine &TM, CodeGenOpt::Level OptLevel)
Definition: RISCVISelDAGToDAG.cpp:2431
llvm::RISCVISD::GREV
@ GREV
Definition: RISCVISelLowering.h:124
llvm::SPIRV::Decoration::Alignment
@ Alignment
llvm::RISCVISD::GORC
@ GORC
Definition: RISCVISelLowering.h:126
llvm::RISCVISD::VMV_S_X_VL
@ VMV_S_X_VL
Definition: RISCVISelLowering.h:160
llvm::SelectionDAG::RemoveDeadNode
void RemoveDeadNode(SDNode *N)
Remove the specified node from the system.
Definition: SelectionDAG.cpp:954
llvm::RISCV::VSEPseudo
Definition: RISCVISelDAGToDAG.h:178
llvm::RISCVDAGToDAGISel::selectVLOp
bool selectVLOp(SDValue N, SDValue &VL)
Definition: RISCVISelDAGToDAG.cpp:2051
llvm::isInt< 32 >
constexpr bool isInt< 32 >(int64_t x)
Definition: MathExtras.h:373
llvm::RISCVDAGToDAGISel::selectVSXSEG
void selectVSXSEG(SDNode *Node, bool IsMasked, bool IsOrdered)
Definition: RISCVISelDAGToDAG.cpp:500
llvm::SelectionDAGISel::IsProfitableToFold
virtual bool IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const
IsProfitableToFold - Returns true if it's profitable to fold the specific operand node N of U during ...
Definition: SelectionDAGISel.cpp:2105
llvm::isUInt< 16 >
constexpr bool isUInt< 16 >(uint64_t x)
Definition: MathExtras.h:408
selectVSplatSimmHelper
static bool selectVSplatSimmHelper(SDValue N, SDValue &SplatVal, SelectionDAG &DAG, const RISCVSubtarget &Subtarget, ValidateFn ValidateImm)
Definition: RISCVISelDAGToDAG.cpp:2084
uint64_t
llvm::RISCVDAGToDAGISel::selectVSplatUimm5
bool selectVSplatUimm5(SDValue N, SDValue &SplatVal)
Definition: RISCVISelDAGToDAG.cpp:2135
llvm::SelectionDAGISel::TII
const TargetInstrInfo * TII
Definition: SelectionDAGISel.h:53
Addr
uint64_t Addr
Definition: ELFObjHandler.cpp:78
llvm::SelectionDAGISel::FuncInfo
std::unique_ptr< FunctionLoweringInfo > FuncInfo
Definition: SelectionDAGISel.h:44
llvm::MachinePointerInfo
This class contains a discriminated union of information about pointers in memory operands,...
Definition: MachineMemOperand.h:39
hasMemOffset
static bool hasMemOffset(SDNode *N, unsigned &BaseOpIdx, unsigned &OffsetOpIdx)
Definition: RISCVISelDAGToDAG.cpp:159
llvm::SelectionDAG::getCopyFromReg
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, unsigned Reg, EVT VT)
Definition: SelectionDAG.h:776
llvm::SelectionDAGISel::IsLegalToFold
static bool IsLegalToFold(SDValue N, SDNode *U, SDNode *Root, CodeGenOpt::Level OptLevel, bool IgnoreChains=false)
IsLegalToFold - Returns true if the specific operand node N of U can be folded during instruction sel...
Definition: SelectionDAGISel.cpp:2113
llvm::SDNode::getOperand
const SDValue & getOperand(unsigned Num) const
Definition: SelectionDAGNodes.h:908
I
#define I(x, y, z)
Definition: MD5.cpp:58
llvm::SelectionDAG::getNode
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
Definition: SelectionDAG.cpp:8838
llvm::countTrailingOnes
unsigned countTrailingOnes(T Value, ZeroBehavior ZB=ZB_Width)
Count the number of ones from the least significant bit to the first zero bit.
Definition: MathExtras.h:525
llvm::RISCVISD::ROLW
@ ROLW
Definition: RISCVISelLowering.h:74
llvm::RISCVMachineFunctionInfo
RISCVMachineFunctionInfo - This class is derived from MachineFunctionInfo and contains private RISCV-...
Definition: RISCVMachineFunctionInfo.h:47
llvm::RISCVSubtarget
Definition: RISCVSubtarget.h:35
llvm::isUInt< 32 >
constexpr bool isUInt< 32 >(uint64_t x)
Definition: MathExtras.h:411
llvm::SDValue::getValue
SDValue getValue(unsigned R) const
Definition: SelectionDAGNodes.h:171
llvm::RISCVDAGToDAGISel::selectVLSEG
void selectVLSEG(SDNode *Node, bool IsMasked, bool IsStrided)
Definition: RISCVISelDAGToDAG.cpp:326
assert
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
llvm::MVT::Other
@ Other
Definition: MachineValueType.h:42
llvm::MVT::getSizeInBits
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
Definition: MachineValueType.h:883
llvm::RISCVMatInt::RegX0
@ RegX0
Definition: RISCVMatInt.h:25
llvm::SelectionDAGISel::CurDAG
SelectionDAG * CurDAG
Definition: SelectionDAGISel.h:48
llvm::RISCVDAGToDAGISel::hasAllWUsers
bool hasAllWUsers(SDNode *Node) const
Definition: RISCVISelDAGToDAG.h:66
llvm::SelectionDAG::getMachineNode
MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s),...
Definition: SelectionDAG.cpp:9546
llvm::MVT
Machine Value Type.
Definition: MachineValueType.h:31
llvm::RISCVISD::SRAW
@ SRAW
Definition: RISCVISelLowering.h:64
llvm::RISCVDAGToDAGISel::selectVSplatSimm5
bool selectVSplatSimm5(SDValue N, SDValue &SplatVal)
Definition: RISCVISelDAGToDAG.cpp:2116
llvm::SelectionDAG::setNodeMemRefs
void setNodeMemRefs(MachineSDNode *N, ArrayRef< MachineMemOperand * > NewMemRefs)
Mutate the specified machine node's memory references to the provided list.
Definition: SelectionDAG.cpp:9314
llvm::RISCVSubtarget::hasStdExtZba
bool hasStdExtZba() const
Definition: RISCVSubtarget.h:154
llvm::MachinePointerInfo::getWithOffset
MachinePointerInfo getWithOffset(int64_t O) const
Definition: MachineMemOperand.h:79
llvm::SDNode::isMachineOpcode
bool isMachineOpcode() const
Test if this node has a post-isel opcode, directly corresponding to a MachineInstr opcode.
Definition: SelectionDAGNodes.h:687
llvm::RISCVII::hasMergeOp
static bool hasMergeOp(uint64_t TSFlags)
Definition: RISCVBaseInfo.h:147
llvm::APInt
Class for arbitrary precision integers.
Definition: APInt.h:75
llvm::MachineFunction
Definition: MachineFunction.h:257
llvm::RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs
static std::pair< unsigned, unsigned > decomposeSubvectorInsertExtractToSubRegs(MVT VecVT, MVT SubVecVT, unsigned InsertExtractIdx, const RISCVRegisterInfo *TRI)
Definition: RISCVISelLowering.cpp:1445
llvm::RISCVISD::REMUW
@ REMUW
Definition: RISCVISelLowering.h:71
llvm::NVPTXISD::Dummy
@ Dummy
Definition: NVPTXISelLowering.h:60
llvm::SelectionDAG::getTargetConstantPool
SDValue getTargetConstantPool(const Constant *C, EVT VT, MaybeAlign Align=None, int Offset=0, unsigned TargetFlags=0)
Definition: SelectionDAG.h:714
llvm::ArrayRef
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: APInt.h:32
llvm::RISCVInstrInfo
Definition: RISCVInstrInfo.h:44
llvm::MVT::i64
@ i64
Definition: MachineValueType.h:49
llvm::countTrailingZeros
unsigned countTrailingZeros(T Val, ZeroBehavior ZB=ZB_Width)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Definition: MathExtras.h:156
llvm::SelectionDAG::getTargetInsertSubreg
SDValue getTargetInsertSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand, SDValue Subreg)
A convenience function for creating TargetInstrInfo::INSERT_SUBREG nodes.
Definition: SelectionDAG.cpp:9674
llvm::RISCVISD::VMV_V_X_VL
@ VMV_V_X_VL
Definition: RISCVISelLowering.h:151
llvm::RISCVSubtarget::getRegisterInfo
const RISCVRegisterInfo * getRegisterInfo() const override
Definition: RISCVSubtarget.h:130
llvm::SDValue::getMachineOpcode
unsigned getMachineOpcode() const
Definition: SelectionDAGNodes.h:1157
llvm_unreachable
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Definition: ErrorHandling.h:143
llvm::RISCVII::LMUL_2
@ LMUL_2
Definition: RISCVBaseInfo.h:110
llvm::SelectionDAG::ReplaceAllUsesOfValueWith
void ReplaceAllUsesOfValueWith(SDValue From, SDValue To)
Replace any uses of From with To, leaving uses of other values produced by From.getNode() alone.
Definition: SelectionDAG.cpp:10134
llvm::X86ISD::FLD
@ FLD
This instruction implements an extending load to FP stack slots.
Definition: X86ISelLowering.h:836
llvm::SDValue::getOperand
const SDValue & getOperand(unsigned i) const
Definition: SelectionDAGNodes.h:1133
DL
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Definition: AArch64SLSHardening.cpp:76
llvm::InlineAsm::Constraint_A
@ Constraint_A
Definition: InlineAsm.h:258
llvm::SDValue::hasOneUse
bool hasOneUse() const
Return true if there is exactly one node using value ResNo of Node.
Definition: SelectionDAGNodes.h:1169
llvm::SDValue::getSimpleValueType
MVT getSimpleValueType() const
Return the simple ValueType of the referenced return value.
Definition: SelectionDAGNodes.h:182
llvm::CodeGenOpt::Level
Level
Definition: CodeGen.h:52
llvm::SDVTList
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
Definition: SelectionDAGNodes.h:78
llvm::SignExtend64
constexpr int64_t SignExtend64(uint64_t x)
Sign-extend the number in the bottom B bits of X to a 64-bit integer.
Definition: MathExtras.h:811
llvm::MachineMemOperand::MOLoad
@ MOLoad
The memory access reads data.
Definition: MachineMemOperand.h:134
llvm::ISD::INTRINSIC_WO_CHAIN
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
Definition: ISDOpcodes.h:184
llvm::MVT::getVectorElementCount
ElementCount getVectorElementCount() const
Definition: MachineValueType.h:865
llvm::RISCVISD::RORW
@ RORW
Definition: RISCVISelLowering.h:75
llvm::ISD::INSERT_SUBVECTOR
@ INSERT_SUBVECTOR
INSERT_SUBVECTOR(VECTOR1, VECTOR2, IDX) - Returns a vector with VECTOR2 inserted into VECTOR1.
Definition: ISDOpcodes.h:558
llvm::HexagonISD::CP
@ CP
Definition: HexagonISelLowering.h:53
llvm::SelectionDAGISel::MF
MachineFunction * MF
Definition: SelectionDAGISel.h:46
CASE_VMSLT_VMNAND_VMSET_OPCODES
#define CASE_VMSLT_VMNAND_VMSET_OPCODES(lmulenum, suffix, suffix_b)
Alignment.h
selectImm
static SDNode * selectImm(SelectionDAG *CurDAG, const SDLoc &DL, const MVT VT, int64_t Imm, const RISCVSubtarget &Subtarget)
Definition: RISCVISelDAGToDAG.cpp:190
llvm::SelectionDAG::computeKnownBits
KnownBits computeKnownBits(SDValue Op, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in Known.
Definition: SelectionDAG.cpp:2889
llvm::KnownBits
Definition: KnownBits.h:23
llvm::RISCVISD::SRLW
@ SRLW
Definition: RISCVISelLowering.h:65
llvm::ISD::EXTRACT_SUBVECTOR
@ EXTRACT_SUBVECTOR
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR.
Definition: ISDOpcodes.h:572
llvm::isNullConstant
bool isNullConstant(SDValue V)
Returns true if V is a constant integer zero.
Definition: SelectionDAG.cpp:10508
llvm::AMDGPU::SendMsg::Op
Op
Definition: SIDefines.h:345
llvm::RISCV::VSXSEGPseudo
Definition: RISCVISelDAGToDAG.h:158
RISCVISelLowering.h
llvm::RISCVDAGToDAGISel::PostprocessISelDAG
void PostprocessISelDAG() override
PostprocessISelDAG() - This hook allows the target to hack on the graph right after selection.
Definition: RISCVISelDAGToDAG.cpp:134
llvm::ilist_iterator
Iterator for intrusive lists based on ilist_node.
Definition: ilist_iterator.h:57
MachineFrameInfo.h
llvm::SelectionDAG::getEntryNode
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
Definition: SelectionDAG.h:531
llvm::RISCVVType::encodeVTYPE
unsigned encodeVTYPE(RISCVII::VLMUL VLMUL, unsigned SEW, bool TailAgnostic, bool MaskAgnostic)
Definition: RISCVBaseInfo.cpp:130
llvm::SelectionDAG::getDataLayout
const DataLayout & getDataLayout() const
Definition: SelectionDAG.h:452
llvm::RISCVDAGToDAGISel::selectVLSEGFF
void selectVLSEGFF(SDNode *Node, bool IsMasked)
Definition: RISCVISelDAGToDAG.cpp:369
llvm::SelectionDAG::getTargetExtractSubreg
SDValue getTargetExtractSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand)
A convenience function for creating TargetInstrInfo::EXTRACT_SUBREG nodes.
Definition: SelectionDAG.cpp:9664
llvm::SelectionDAGISel::ReplaceUses
void ReplaceUses(SDValue F, SDValue T)
ReplaceUses - replace all uses of the old node F with the use of the new node T.
Definition: SelectionDAGISel.h:208
llvm::MVT::i32
@ i32
Definition: MachineValueType.h:48
llvm::RISCVSubtarget::getXLen
unsigned getXLen() const
Definition: RISCVSubtarget.h:190
llvm::SDValue
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
Definition: SelectionDAGNodes.h:137
llvm::RISCVTargetLowering
Definition: RISCVISelLowering.h:347
llvm::RISCVMatInt::RegReg
@ RegReg
Definition: RISCVMatInt.h:24
llvm::XCoreISD::LMUL
@ LMUL
Definition: XCoreISelLowering.h:59
llvm::countLeadingZeros
unsigned countLeadingZeros(T Val, ZeroBehavior ZB=ZB_Width)
Count number of 0's from the most significant bit to the least stopping at the first 1.
Definition: MathExtras.h:225
llvm::RISCVTargetLowering::getLMUL
static RISCVII::VLMUL getLMUL(MVT VT)
Definition: RISCVISelLowering.cpp:1366
llvm::AMDGPU::Hwreg::Width
Width
Definition: SIDefines.h:436
llvm::ISD::ADD
@ ADD
Simple integer binary arithmetic operators.
Definition: ISDOpcodes.h:239
llvm::RISCVISD::VFMV_V_F_VL
@ VFMV_V_F_VL
Definition: RISCVISelLowering.h:155
llvm::SDValue::isUndef
bool isUndef() const
Definition: SelectionDAGNodes.h:1161
llvm::codeview::ModifierOptions::Const
@ Const
llvm::RISCVMatInt::Imm
@ Imm
Definition: RISCVMatInt.h:23
llvm::RISCVII::LMUL_F8
@ LMUL_F8
Definition: RISCVBaseInfo.h:114
llvm::ISD::SHL
@ SHL
Shift and rotation operations.
Definition: ISDOpcodes.h:691
llvm::MachinePointerInfo::getFixedStack
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
Definition: MachineOperand.cpp:1006
llvm::RISCVMatInt::RegImm
@ RegImm
Definition: RISCVMatInt.h:22
llvm::ISD::MUL
@ MUL
Definition: ISDOpcodes.h:241
N
#define N
llvm::ISD::SRL
@ SRL
Definition: ISDOpcodes.h:693
RISCVMachineFunctionInfo.h
llvm::RISCVDAGToDAGISel::selectRVVSimm5
bool selectRVVSimm5(SDValue N, unsigned Width, SDValue &Imm)
Definition: RISCVISelDAGToDAG.cpp:2152
llvm::ArrayRef::size
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:164
llvm::RISCVII::LMUL_F4
@ LMUL_F4
Definition: RISCVBaseInfo.h:115
llvm::RISCVDAGToDAGISel::Select
void Select(SDNode *Node) override
Main hook for targets to transform nodes into machine nodes.
Definition: RISCVISelDAGToDAG.cpp:612
llvm::RISCVII::VLMUL
VLMUL
Definition: RISCVBaseInfo.h:108
llvm::MVT::Untyped
@ Untyped
Definition: MachineValueType.h:274
llvm::SmallVectorImpl
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:42
llvm::ISD::MULHU
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
Definition: ISDOpcodes.h:637
llvm::SDValue::getOpcode
unsigned getOpcode() const
Definition: SelectionDAGNodes.h:1121
llvm::SelectionDAG::getTargetConstant
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
Definition: SelectionDAG.h:652
TM
const char LLVMTargetMachineRef TM
Definition: PassBuilderBindings.cpp:47
llvm::FunctionPass
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:308
llvm::ISD::INTRINSIC_W_CHAIN
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
Definition: ISDOpcodes.h:192
llvm::SelectionDAG::DeleteNode
void DeleteNode(SDNode *N)
Remove the specified node from the system.
Definition: SelectionDAG.cpp:965
CASE_VMXOR_VMANDN_VMOR_OPCODES
#define CASE_VMXOR_VMANDN_VMOR_OPCODES(lmulenum, suffix)
llvm::SelectionDAG::getMachineFunction
MachineFunction & getMachineFunction() const
Definition: SelectionDAG.h:449
llvm::SelectionDAG::ComputeNumSignBits
unsigned ComputeNumSignBits(SDValue Op, unsigned Depth=0) const
Return the number of times the sign bit of the register is replicated into the other bits.
Definition: SelectionDAG.cpp:3864
llvm::RISCVII::hasDummyMaskOp
static bool hasDummyMaskOp(uint64_t TSFlags)
Definition: RISCVBaseInfo.h:139
llvm::MCInstrInfo::get
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode.
Definition: MCInstrInfo.h:63
llvm::isMask_64
constexpr bool isMask_64(uint64_t Value)
Return true if the argument is a non-empty sequence of ones starting at the least significant bit wit...
Definition: MathExtras.h:473
llvm::RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL
@ SPLAT_VECTOR_SPLIT_I64_VL
Definition: RISCVISelLowering.h:166
llvm::User::getOperand
Value * getOperand(unsigned i) const
Definition: User.h:169
llvm::M1
unsigned M1(unsigned Val)
Definition: VE.h:370
raw_ostream.h
llvm::SDValue::isMachineOpcode
bool isMachineOpcode() const
Definition: SelectionDAGNodes.h:1153
llvm::RISCV::VLEPseudo
Definition: RISCVISelDAGToDAG.h:168
llvm::RISCVDAGToDAGISel::hasAllHUsers
bool hasAllHUsers(SDNode *Node) const
Definition: RISCVISelDAGToDAG.h:65
ValidateFn
bool(*)(int64_t) ValidateFn
Definition: RISCVISelDAGToDAG.cpp:2082
llvm::RISCVISD::CTZW
@ CTZW
Definition: RISCVISelLowering.h:79
llvm::RISCVDAGToDAGISel::hasAllNBitUsers
bool hasAllNBitUsers(SDNode *Node, unsigned Bits) const
Definition: RISCVISelDAGToDAG.cpp:1951
Debug.h
llvm::RISCVDAGToDAGISel::selectSExti32
bool selectSExti32(SDValue N, SDValue &Val)
Definition: RISCVISelDAGToDAG.cpp:1907
llvm::TargetLoweringBase::getPointerTy
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
Definition: TargetLowering.h:354
llvm::ISD::TokenFactor
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
Definition: ISDOpcodes.h:52
llvm::RISCVII::LMUL_F2
@ LMUL_F2
Definition: RISCVBaseInfo.h:116
llvm::sampleprof::Base
@ Base
Definition: Discriminator.h:58
llvm::RISCVII::TAIL_AGNOSTIC
@ TAIL_AGNOSTIC
Definition: RISCVBaseInfo.h:120
llvm::RISCVDAGToDAGISel::addVectorLoadStoreOperands
void addVectorLoadStoreOperands(SDNode *Node, unsigned SEWImm, const SDLoc &DL, unsigned CurOp, bool IsMasked, bool IsStridedOrIndexed, SmallVectorImpl< SDValue > &Operands, bool IsLoad=false, MVT *IndexVT=nullptr)
Definition: RISCVISelDAGToDAG.cpp:277
isAllUndef
static bool isAllUndef(ArrayRef< SDValue > Values)
Definition: RISCVISelDAGToDAG.cpp:322