LLVM  14.0.0git
RISCVISelDAGToDAG.cpp
Go to the documentation of this file.
1 //===-- RISCVISelDAGToDAG.cpp - A dag to dag inst selector for RISCV ------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines an instruction selector for the RISCV target.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "RISCVISelDAGToDAG.h"
16 #include "RISCVISelLowering.h"
19 #include "llvm/IR/IntrinsicsRISCV.h"
20 #include "llvm/Support/Alignment.h"
21 #include "llvm/Support/Debug.h"
22 #include "llvm/Support/KnownBits.h"
25 
26 using namespace llvm;
27 
28 #define DEBUG_TYPE "riscv-isel"
29 
30 namespace llvm {
31 namespace RISCV {
32 #define GET_RISCVVSSEGTable_IMPL
33 #define GET_RISCVVLSEGTable_IMPL
34 #define GET_RISCVVLXSEGTable_IMPL
35 #define GET_RISCVVSXSEGTable_IMPL
36 #define GET_RISCVVLETable_IMPL
37 #define GET_RISCVVSETable_IMPL
38 #define GET_RISCVVLXTable_IMPL
39 #define GET_RISCVVSXTable_IMPL
40 #include "RISCVGenSearchableTables.inc"
41 } // namespace RISCV
42 } // namespace llvm
43 
46  E = CurDAG->allnodes_end();
47  I != E;) {
48  SDNode *N = &*I++; // Preincrement iterator to avoid invalidation issues.
49 
50  // Lower SPLAT_VECTOR_SPLIT_I64 to two scalar stores and a stride 0 vector
51  // load. Done after lowering and combining so that we have a chance to
52  // optimize this to VMV_V_X_VL when the upper bits aren't needed.
53  if (N->getOpcode() != RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL)
54  continue;
55 
56  assert(N->getNumOperands() == 3 && "Unexpected number of operands");
57  MVT VT = N->getSimpleValueType(0);
58  SDValue Lo = N->getOperand(0);
59  SDValue Hi = N->getOperand(1);
60  SDValue VL = N->getOperand(2);
62  Lo.getValueType() == MVT::i32 && Hi.getValueType() == MVT::i32 &&
63  "Unexpected VTs!");
66  SDLoc DL(N);
67 
68  // We use the same frame index we use for moving two i32s into 64-bit FPR.
69  // This is an analogous operation.
70  int FI = FuncInfo->getMoveF64FrameIndex(MF);
73  SDValue StackSlot =
75 
76  SDValue Chain = CurDAG->getEntryNode();
77  Lo = CurDAG->getStore(Chain, DL, Lo, StackSlot, MPI, Align(8));
78 
79  SDValue OffsetSlot =
81  Hi = CurDAG->getStore(Chain, DL, Hi, OffsetSlot, MPI.getWithOffset(4),
82  Align(8));
83 
85 
86  SDVTList VTs = CurDAG->getVTList({VT, MVT::Other});
87  SDValue IntID =
88  CurDAG->getTargetConstant(Intrinsic::riscv_vlse, DL, MVT::i64);
89  SDValue Ops[] = {Chain, IntID, StackSlot,
90  CurDAG->getRegister(RISCV::X0, MVT::i64), VL};
91 
93  ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, MVT::i64, MPI, Align(8),
95 
96  // We're about to replace all uses of the SPLAT_VECTOR_SPLIT_I64 with the
97  // vlse we created. This will cause general havok on the dag because
98  // anything below the conversion could be folded into other existing nodes.
99  // To avoid invalidating 'I', back it up to the convert node.
100  --I;
102 
103  // Now that we did that, the node is dead. Increment the iterator to the
104  // next node to process, then delete N.
105  ++I;
106  CurDAG->DeleteNode(N);
107  }
108 }
109 
112 
113  bool MadeChange = false;
114  while (Position != CurDAG->allnodes_begin()) {
115  SDNode *N = &*--Position;
116  // Skip dead nodes and any non-machine opcodes.
117  if (N->use_empty() || !N->isMachineOpcode())
118  continue;
119 
120  MadeChange |= doPeepholeSExtW(N);
121  MadeChange |= doPeepholeLoadStoreADDI(N);
122  }
123 
124  if (MadeChange)
126 }
127 
128 static SDNode *selectImm(SelectionDAG *CurDAG, const SDLoc &DL, int64_t Imm,
129  const RISCVSubtarget &Subtarget) {
130  MVT XLenVT = Subtarget.getXLenVT();
132  RISCVMatInt::generateInstSeq(Imm, Subtarget.getFeatureBits());
133 
134  SDNode *Result = nullptr;
135  SDValue SrcReg = CurDAG->getRegister(RISCV::X0, XLenVT);
136  for (RISCVMatInt::Inst &Inst : Seq) {
137  SDValue SDImm = CurDAG->getTargetConstant(Inst.Imm, DL, XLenVT);
138  if (Inst.Opc == RISCV::LUI)
139  Result = CurDAG->getMachineNode(RISCV::LUI, DL, XLenVT, SDImm);
140  else if (Inst.Opc == RISCV::ADDUW)
141  Result = CurDAG->getMachineNode(RISCV::ADDUW, DL, XLenVT, SrcReg,
142  CurDAG->getRegister(RISCV::X0, XLenVT));
143  else
144  Result = CurDAG->getMachineNode(Inst.Opc, DL, XLenVT, SrcReg, SDImm);
145 
146  // Only the first instruction has X0 as its source.
147  SrcReg = SDValue(Result, 0);
148  }
149 
150  return Result;
151 }
152 
154  unsigned RegClassID, unsigned SubReg0) {
155  assert(Regs.size() >= 2 && Regs.size() <= 8);
156 
157  SDLoc DL(Regs[0]);
159 
160  Ops.push_back(CurDAG.getTargetConstant(RegClassID, DL, MVT::i32));
161 
162  for (unsigned I = 0; I < Regs.size(); ++I) {
163  Ops.push_back(Regs[I]);
164  Ops.push_back(CurDAG.getTargetConstant(SubReg0 + I, DL, MVT::i32));
165  }
166  SDNode *N =
167  CurDAG.getMachineNode(TargetOpcode::REG_SEQUENCE, DL, MVT::Untyped, Ops);
168  return SDValue(N, 0);
169 }
170 
172  unsigned NF) {
173  static const unsigned RegClassIDs[] = {
174  RISCV::VRN2M1RegClassID, RISCV::VRN3M1RegClassID, RISCV::VRN4M1RegClassID,
175  RISCV::VRN5M1RegClassID, RISCV::VRN6M1RegClassID, RISCV::VRN7M1RegClassID,
176  RISCV::VRN8M1RegClassID};
177 
178  return createTupleImpl(CurDAG, Regs, RegClassIDs[NF - 2], RISCV::sub_vrm1_0);
179 }
180 
182  unsigned NF) {
183  static const unsigned RegClassIDs[] = {RISCV::VRN2M2RegClassID,
184  RISCV::VRN3M2RegClassID,
185  RISCV::VRN4M2RegClassID};
186 
187  return createTupleImpl(CurDAG, Regs, RegClassIDs[NF - 2], RISCV::sub_vrm2_0);
188 }
189 
191  unsigned NF) {
192  return createTupleImpl(CurDAG, Regs, RISCV::VRN2M4RegClassID,
193  RISCV::sub_vrm4_0);
194 }
195 
197  unsigned NF, RISCVII::VLMUL LMUL) {
198  switch (LMUL) {
199  default:
200  llvm_unreachable("Invalid LMUL.");
205  return createM1Tuple(CurDAG, Regs, NF);
207  return createM2Tuple(CurDAG, Regs, NF);
209  return createM4Tuple(CurDAG, Regs, NF);
210  }
211 }
212 
214  SDNode *Node, unsigned Log2SEW, const SDLoc &DL, unsigned CurOp,
215  bool IsMasked, bool IsStridedOrIndexed, SmallVectorImpl<SDValue> &Operands,
216  MVT *IndexVT) {
217  SDValue Chain = Node->getOperand(0);
218  SDValue Glue;
219 
220  SDValue Base;
221  SelectBaseAddr(Node->getOperand(CurOp++), Base);
222  Operands.push_back(Base); // Base pointer.
223 
224  if (IsStridedOrIndexed) {
225  Operands.push_back(Node->getOperand(CurOp++)); // Index.
226  if (IndexVT)
227  *IndexVT = Operands.back()->getSimpleValueType(0);
228  }
229 
230  if (IsMasked) {
231  // Mask needs to be copied to V0.
232  SDValue Mask = Node->getOperand(CurOp++);
233  Chain = CurDAG->getCopyToReg(Chain, DL, RISCV::V0, Mask, SDValue());
234  Glue = Chain.getValue(1);
235  Operands.push_back(CurDAG->getRegister(RISCV::V0, Mask.getValueType()));
236  }
237  SDValue VL;
238  selectVLOp(Node->getOperand(CurOp++), VL);
239  Operands.push_back(VL);
240 
241  MVT XLenVT = Subtarget->getXLenVT();
242  SDValue SEWOp = CurDAG->getTargetConstant(Log2SEW, DL, XLenVT);
243  Operands.push_back(SEWOp);
244 
245  Operands.push_back(Chain); // Chain.
246  if (Glue)
247  Operands.push_back(Glue);
248 }
249 
250 void RISCVDAGToDAGISel::selectVLSEG(SDNode *Node, bool IsMasked,
251  bool IsStrided) {
252  SDLoc DL(Node);
253  unsigned NF = Node->getNumValues() - 1;
254  MVT VT = Node->getSimpleValueType(0);
255  unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
257 
258  unsigned CurOp = 2;
260  if (IsMasked) {
261  SmallVector<SDValue, 8> Regs(Node->op_begin() + CurOp,
262  Node->op_begin() + CurOp + NF);
263  SDValue MaskedOff = createTuple(*CurDAG, Regs, NF, LMUL);
264  Operands.push_back(MaskedOff);
265  CurOp += NF;
266  }
267 
268  addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, IsStrided,
269  Operands);
270 
271  const RISCV::VLSEGPseudo *P =
272  RISCV::getVLSEGPseudo(NF, IsMasked, IsStrided, /*FF*/ false, Log2SEW,
273  static_cast<unsigned>(LMUL));
276 
277  if (auto *MemOp = dyn_cast<MemSDNode>(Node))
278  CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
279 
280  SDValue SuperReg = SDValue(Load, 0);
281  for (unsigned I = 0; I < NF; ++I) {
282  unsigned SubRegIdx = RISCVTargetLowering::getSubregIndexByMVT(VT, I);
283  ReplaceUses(SDValue(Node, I),
284  CurDAG->getTargetExtractSubreg(SubRegIdx, DL, VT, SuperReg));
285  }
286 
287  ReplaceUses(SDValue(Node, NF), SDValue(Load, 1));
288  CurDAG->RemoveDeadNode(Node);
289 }
290 
291 void RISCVDAGToDAGISel::selectVLSEGFF(SDNode *Node, bool IsMasked) {
292  SDLoc DL(Node);
293  unsigned NF = Node->getNumValues() - 2; // Do not count VL and Chain.
294  MVT VT = Node->getSimpleValueType(0);
295  MVT XLenVT = Subtarget->getXLenVT();
296  unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
298 
299  unsigned CurOp = 2;
301  if (IsMasked) {
302  SmallVector<SDValue, 8> Regs(Node->op_begin() + CurOp,
303  Node->op_begin() + CurOp + NF);
304  SDValue MaskedOff = createTuple(*CurDAG, Regs, NF, LMUL);
305  Operands.push_back(MaskedOff);
306  CurOp += NF;
307  }
308 
309  addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
310  /*IsStridedOrIndexed*/ false, Operands);
311 
312  const RISCV::VLSEGPseudo *P =
313  RISCV::getVLSEGPseudo(NF, IsMasked, /*Strided*/ false, /*FF*/ true,
314  Log2SEW, static_cast<unsigned>(LMUL));
317  SDNode *ReadVL = CurDAG->getMachineNode(RISCV::PseudoReadVL, DL, XLenVT,
318  /*Glue*/ SDValue(Load, 2));
319 
320  if (auto *MemOp = dyn_cast<MemSDNode>(Node))
321  CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
322 
323  SDValue SuperReg = SDValue(Load, 0);
324  for (unsigned I = 0; I < NF; ++I) {
325  unsigned SubRegIdx = RISCVTargetLowering::getSubregIndexByMVT(VT, I);
326  ReplaceUses(SDValue(Node, I),
327  CurDAG->getTargetExtractSubreg(SubRegIdx, DL, VT, SuperReg));
328  }
329 
330  ReplaceUses(SDValue(Node, NF), SDValue(ReadVL, 0)); // VL
331  ReplaceUses(SDValue(Node, NF + 1), SDValue(Load, 1)); // Chain
332  CurDAG->RemoveDeadNode(Node);
333 }
334 
335 void RISCVDAGToDAGISel::selectVLXSEG(SDNode *Node, bool IsMasked,
336  bool IsOrdered) {
337  SDLoc DL(Node);
338  unsigned NF = Node->getNumValues() - 1;
339  MVT VT = Node->getSimpleValueType(0);
340  unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
342 
343  unsigned CurOp = 2;
345  if (IsMasked) {
346  SmallVector<SDValue, 8> Regs(Node->op_begin() + CurOp,
347  Node->op_begin() + CurOp + NF);
348  SDValue MaskedOff = createTuple(*CurDAG, Regs, NF, LMUL);
349  Operands.push_back(MaskedOff);
350  CurOp += NF;
351  }
352 
353  MVT IndexVT;
354  addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
355  /*IsStridedOrIndexed*/ true, Operands, &IndexVT);
356 
358  "Element count mismatch");
359 
360  RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
361  unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());
362  const RISCV::VLXSEGPseudo *P = RISCV::getVLXSEGPseudo(
363  NF, IsMasked, IsOrdered, IndexLog2EEW, static_cast<unsigned>(LMUL),
364  static_cast<unsigned>(IndexLMUL));
367 
368  if (auto *MemOp = dyn_cast<MemSDNode>(Node))
369  CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
370 
371  SDValue SuperReg = SDValue(Load, 0);
372  for (unsigned I = 0; I < NF; ++I) {
373  unsigned SubRegIdx = RISCVTargetLowering::getSubregIndexByMVT(VT, I);
374  ReplaceUses(SDValue(Node, I),
375  CurDAG->getTargetExtractSubreg(SubRegIdx, DL, VT, SuperReg));
376  }
377 
378  ReplaceUses(SDValue(Node, NF), SDValue(Load, 1));
379  CurDAG->RemoveDeadNode(Node);
380 }
381 
382 void RISCVDAGToDAGISel::selectVSSEG(SDNode *Node, bool IsMasked,
383  bool IsStrided) {
384  SDLoc DL(Node);
385  unsigned NF = Node->getNumOperands() - 4;
386  if (IsStrided)
387  NF--;
388  if (IsMasked)
389  NF--;
390  MVT VT = Node->getOperand(2)->getSimpleValueType(0);
391  unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
393  SmallVector<SDValue, 8> Regs(Node->op_begin() + 2, Node->op_begin() + 2 + NF);
394  SDValue StoreVal = createTuple(*CurDAG, Regs, NF, LMUL);
395 
397  Operands.push_back(StoreVal);
398  unsigned CurOp = 2 + NF;
399 
400  addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, IsStrided,
401  Operands);
402 
403  const RISCV::VSSEGPseudo *P = RISCV::getVSSEGPseudo(
404  NF, IsMasked, IsStrided, Log2SEW, static_cast<unsigned>(LMUL));
406  CurDAG->getMachineNode(P->Pseudo, DL, Node->getValueType(0), Operands);
407 
408  if (auto *MemOp = dyn_cast<MemSDNode>(Node))
409  CurDAG->setNodeMemRefs(Store, {MemOp->getMemOperand()});
410 
411  ReplaceNode(Node, Store);
412 }
413 
414 void RISCVDAGToDAGISel::selectVSXSEG(SDNode *Node, bool IsMasked,
415  bool IsOrdered) {
416  SDLoc DL(Node);
417  unsigned NF = Node->getNumOperands() - 5;
418  if (IsMasked)
419  --NF;
420  MVT VT = Node->getOperand(2)->getSimpleValueType(0);
421  unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
423  SmallVector<SDValue, 8> Regs(Node->op_begin() + 2, Node->op_begin() + 2 + NF);
424  SDValue StoreVal = createTuple(*CurDAG, Regs, NF, LMUL);
425 
427  Operands.push_back(StoreVal);
428  unsigned CurOp = 2 + NF;
429 
430  MVT IndexVT;
431  addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
432  /*IsStridedOrIndexed*/ true, Operands, &IndexVT);
433 
435  "Element count mismatch");
436 
437  RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
438  unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());
439  const RISCV::VSXSEGPseudo *P = RISCV::getVSXSEGPseudo(
440  NF, IsMasked, IsOrdered, IndexLog2EEW, static_cast<unsigned>(LMUL),
441  static_cast<unsigned>(IndexLMUL));
443  CurDAG->getMachineNode(P->Pseudo, DL, Node->getValueType(0), Operands);
444 
445  if (auto *MemOp = dyn_cast<MemSDNode>(Node))
446  CurDAG->setNodeMemRefs(Store, {MemOp->getMemOperand()});
447 
448  ReplaceNode(Node, Store);
449 }
450 
451 
453  // If we have a custom node, we have already selected.
454  if (Node->isMachineOpcode()) {
455  LLVM_DEBUG(dbgs() << "== "; Node->dump(CurDAG); dbgs() << "\n");
456  Node->setNodeId(-1);
457  return;
458  }
459 
460  // Instruction Selection not handled by the auto-generated tablegen selection
461  // should be handled here.
462  unsigned Opcode = Node->getOpcode();
463  MVT XLenVT = Subtarget->getXLenVT();
464  SDLoc DL(Node);
465  MVT VT = Node->getSimpleValueType(0);
466 
467  switch (Opcode) {
468  case ISD::Constant: {
469  auto *ConstNode = cast<ConstantSDNode>(Node);
470  if (VT == XLenVT && ConstNode->isZero()) {
471  SDValue New =
472  CurDAG->getCopyFromReg(CurDAG->getEntryNode(), DL, RISCV::X0, XLenVT);
473  ReplaceNode(Node, New.getNode());
474  return;
475  }
476  int64_t Imm = ConstNode->getSExtValue();
477  // If the upper XLen-16 bits are not used, try to convert this to a simm12
478  // by sign extending bit 15.
479  if (isUInt<16>(Imm) && isInt<12>(SignExtend64(Imm, 16)) &&
480  hasAllHUsers(Node))
481  Imm = SignExtend64(Imm, 16);
482  // If the upper 32-bits are not used try to convert this into a simm32 by
483  // sign extending bit 32.
484  if (!isInt<32>(Imm) && isUInt<32>(Imm) && hasAllWUsers(Node))
485  Imm = SignExtend64(Imm, 32);
486 
487  ReplaceNode(Node, selectImm(CurDAG, DL, Imm, *Subtarget));
488  return;
489  }
490  case ISD::FrameIndex: {
491  SDValue Imm = CurDAG->getTargetConstant(0, DL, XLenVT);
492  int FI = cast<FrameIndexSDNode>(Node)->getIndex();
493  SDValue TFI = CurDAG->getTargetFrameIndex(FI, VT);
494  ReplaceNode(Node, CurDAG->getMachineNode(RISCV::ADDI, DL, VT, TFI, Imm));
495  return;
496  }
497  case ISD::SRL: {
498  // We don't need this transform if zext.h is supported.
499  if (Subtarget->hasStdExtZbb() || Subtarget->hasStdExtZbp())
500  break;
501  // Optimize (srl (and X, 0xffff), C) ->
502  // (srli (slli X, (XLen-16), (XLen-16) + C)
503  // Taking into account that the 0xffff may have had lower bits unset by
504  // SimplifyDemandedBits. This avoids materializing the 0xffff immediate.
505  // This pattern occurs when type legalizing i16 right shifts.
506  // FIXME: This could be extended to other AND masks.
507  auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
508  if (N1C) {
509  uint64_t ShAmt = N1C->getZExtValue();
510  SDValue N0 = Node->getOperand(0);
511  if (ShAmt < 16 && N0.getOpcode() == ISD::AND && N0.hasOneUse() &&
512  isa<ConstantSDNode>(N0.getOperand(1))) {
514  Mask |= maskTrailingOnes<uint64_t>(ShAmt);
515  if (Mask == 0xffff) {
516  unsigned LShAmt = Subtarget->getXLen() - 16;
517  SDNode *SLLI =
518  CurDAG->getMachineNode(RISCV::SLLI, DL, VT, N0->getOperand(0),
519  CurDAG->getTargetConstant(LShAmt, DL, VT));
520  SDNode *SRLI = CurDAG->getMachineNode(
521  RISCV::SRLI, DL, VT, SDValue(SLLI, 0),
522  CurDAG->getTargetConstant(LShAmt + ShAmt, DL, VT));
523  ReplaceNode(Node, SRLI);
524  return;
525  }
526  }
527  }
528 
529  break;
530  }
531  case ISD::AND: {
532  auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
533  if (!N1C)
534  break;
535 
536  SDValue N0 = Node->getOperand(0);
537 
538  bool LeftShift = N0.getOpcode() == ISD::SHL;
539  if (!LeftShift && N0.getOpcode() != ISD::SRL)
540  break;
541 
542  auto *C = dyn_cast<ConstantSDNode>(N0.getOperand(1));
543  if (!C)
544  break;
545  uint64_t C2 = C->getZExtValue();
546  unsigned XLen = Subtarget->getXLen();
547  if (!C2 || C2 >= XLen)
548  break;
549 
550  uint64_t C1 = N1C->getZExtValue();
551 
552  // Keep track of whether this is a andi, zext.h, or zext.w.
553  bool ZExtOrANDI = isInt<12>(N1C->getSExtValue());
554  if (C1 == UINT64_C(0xFFFF) &&
555  (Subtarget->hasStdExtZbb() || Subtarget->hasStdExtZbp()))
556  ZExtOrANDI = true;
557  if (C1 == UINT64_C(0xFFFFFFFF) && Subtarget->hasStdExtZba())
558  ZExtOrANDI = true;
559 
560  // Clear irrelevant bits in the mask.
561  if (LeftShift)
562  C1 &= maskTrailingZeros<uint64_t>(C2);
563  else
564  C1 &= maskTrailingOnes<uint64_t>(XLen - C2);
565 
566  // Some transforms should only be done if the shift has a single use or
567  // the AND would become (srli (slli X, 32), 32)
568  bool OneUseOrZExtW = N0.hasOneUse() || C1 == UINT64_C(0xFFFFFFFF);
569 
570  SDValue X = N0.getOperand(0);
571 
572  // Turn (and (srl x, c2) c1) -> (srli (slli x, c3-c2), c3) if c1 is a mask
573  // with c3 leading zeros.
574  if (!LeftShift && isMask_64(C1)) {
575  uint64_t C3 = XLen - (64 - countLeadingZeros(C1));
576  if (C2 < C3) {
577  // If the number of leading zeros is C2+32 this can be SRLIW.
578  if (C2 + 32 == C3) {
579  SDNode *SRLIW =
580  CurDAG->getMachineNode(RISCV::SRLIW, DL, XLenVT, X,
581  CurDAG->getTargetConstant(C2, DL, XLenVT));
582  ReplaceNode(Node, SRLIW);
583  return;
584  }
585 
586  // (and (srl (sexti32 Y), c2), c1) -> (srliw (sraiw Y, 31), c3 - 32) if
587  // c1 is a mask with c3 leading zeros and c2 >= 32 and c3-c2==1.
588  //
589  // This pattern occurs when (i32 (srl (sra 31), c3 - 32)) is type
590  // legalized and goes through DAG combine.
591  SDValue Y;
592  if (C2 >= 32 && (C3 - C2) == 1 && N0.hasOneUse() &&
593  selectSExti32(X, Y)) {
594  SDNode *SRAIW =
595  CurDAG->getMachineNode(RISCV::SRAIW, DL, XLenVT, Y,
596  CurDAG->getTargetConstant(31, DL, XLenVT));
597  SDNode *SRLIW = CurDAG->getMachineNode(
598  RISCV::SRLIW, DL, XLenVT, SDValue(SRAIW, 0),
599  CurDAG->getTargetConstant(C3 - 32, DL, XLenVT));
600  ReplaceNode(Node, SRLIW);
601  return;
602  }
603 
604  // (srli (slli x, c3-c2), c3).
605  if (OneUseOrZExtW && !ZExtOrANDI) {
606  SDNode *SLLI = CurDAG->getMachineNode(
607  RISCV::SLLI, DL, XLenVT, X,
608  CurDAG->getTargetConstant(C3 - C2, DL, XLenVT));
609  SDNode *SRLI =
610  CurDAG->getMachineNode(RISCV::SRLI, DL, XLenVT, SDValue(SLLI, 0),
611  CurDAG->getTargetConstant(C3, DL, XLenVT));
612  ReplaceNode(Node, SRLI);
613  return;
614  }
615  }
616  }
617 
618  // Turn (and (shl x, c2) c1) -> (srli (slli c2+c3), c3) if c1 is a mask
619  // shifted by c2 bits with c3 leading zeros.
620  if (LeftShift && isShiftedMask_64(C1)) {
621  uint64_t C3 = XLen - (64 - countLeadingZeros(C1));
622 
623  if (C2 + C3 < XLen &&
624  C1 == (maskTrailingOnes<uint64_t>(XLen - (C2 + C3)) << C2)) {
625  // Use slli.uw when possible.
626  if ((XLen - (C2 + C3)) == 32 && Subtarget->hasStdExtZba()) {
627  SDNode *SLLIUW =
628  CurDAG->getMachineNode(RISCV::SLLIUW, DL, XLenVT, X,
629  CurDAG->getTargetConstant(C2, DL, XLenVT));
630  ReplaceNode(Node, SLLIUW);
631  return;
632  }
633 
634  // (srli (slli c2+c3), c3)
635  if (OneUseOrZExtW && !ZExtOrANDI) {
636  SDNode *SLLI = CurDAG->getMachineNode(
637  RISCV::SLLI, DL, XLenVT, X,
638  CurDAG->getTargetConstant(C2 + C3, DL, XLenVT));
639  SDNode *SRLI =
640  CurDAG->getMachineNode(RISCV::SRLI, DL, XLenVT, SDValue(SLLI, 0),
641  CurDAG->getTargetConstant(C3, DL, XLenVT));
642  ReplaceNode(Node, SRLI);
643  return;
644  }
645  }
646  }
647 
648  break;
649  }
651  unsigned IntNo = Node->getConstantOperandVal(0);
652  switch (IntNo) {
653  // By default we do not custom select any intrinsic.
654  default:
655  break;
656  case Intrinsic::riscv_vmsgeu:
657  case Intrinsic::riscv_vmsge: {
658  SDValue Src1 = Node->getOperand(1);
659  SDValue Src2 = Node->getOperand(2);
660  // Only custom select scalar second operand.
661  if (Src2.getValueType() != XLenVT)
662  break;
663  // Small constants are handled with patterns.
664  if (auto *C = dyn_cast<ConstantSDNode>(Src2)) {
665  int64_t CVal = C->getSExtValue();
666  if (CVal >= -15 && CVal <= 16)
667  break;
668  }
669  bool IsUnsigned = IntNo == Intrinsic::riscv_vmsgeu;
670  MVT Src1VT = Src1.getSimpleValueType();
671  unsigned VMSLTOpcode, VMNANDOpcode;
672  switch (RISCVTargetLowering::getLMUL(Src1VT)) {
673  default:
674  llvm_unreachable("Unexpected LMUL!");
676  VMSLTOpcode =
677  IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF8 : RISCV::PseudoVMSLT_VX_MF8;
678  VMNANDOpcode = RISCV::PseudoVMNAND_MM_MF8;
679  break;
681  VMSLTOpcode =
682  IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF4 : RISCV::PseudoVMSLT_VX_MF4;
683  VMNANDOpcode = RISCV::PseudoVMNAND_MM_MF4;
684  break;
686  VMSLTOpcode =
687  IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF2 : RISCV::PseudoVMSLT_VX_MF2;
688  VMNANDOpcode = RISCV::PseudoVMNAND_MM_MF2;
689  break;
691  VMSLTOpcode =
692  IsUnsigned ? RISCV::PseudoVMSLTU_VX_M1 : RISCV::PseudoVMSLT_VX_M1;
693  VMNANDOpcode = RISCV::PseudoVMNAND_MM_M1;
694  break;
696  VMSLTOpcode =
697  IsUnsigned ? RISCV::PseudoVMSLTU_VX_M2 : RISCV::PseudoVMSLT_VX_M2;
698  VMNANDOpcode = RISCV::PseudoVMNAND_MM_M2;
699  break;
701  VMSLTOpcode =
702  IsUnsigned ? RISCV::PseudoVMSLTU_VX_M4 : RISCV::PseudoVMSLT_VX_M4;
703  VMNANDOpcode = RISCV::PseudoVMNAND_MM_M4;
704  break;
706  VMSLTOpcode =
707  IsUnsigned ? RISCV::PseudoVMSLTU_VX_M8 : RISCV::PseudoVMSLT_VX_M8;
708  VMNANDOpcode = RISCV::PseudoVMNAND_MM_M8;
709  break;
710  }
712  Log2_32(Src1VT.getScalarSizeInBits()), DL, XLenVT);
713  SDValue VL;
714  selectVLOp(Node->getOperand(3), VL);
715 
716  // Expand to
717  // vmslt{u}.vx vd, va, x; vmnand.mm vd, vd, vd
718  SDValue Cmp = SDValue(
719  CurDAG->getMachineNode(VMSLTOpcode, DL, VT, {Src1, Src2, VL, SEW}),
720  0);
721  ReplaceNode(Node, CurDAG->getMachineNode(VMNANDOpcode, DL, VT,
722  {Cmp, Cmp, VL, SEW}));
723  return;
724  }
725  case Intrinsic::riscv_vmsgeu_mask:
726  case Intrinsic::riscv_vmsge_mask: {
727  SDValue Src1 = Node->getOperand(2);
728  SDValue Src2 = Node->getOperand(3);
729  // Only custom select scalar second operand.
730  if (Src2.getValueType() != XLenVT)
731  break;
732  // Small constants are handled with patterns.
733  if (auto *C = dyn_cast<ConstantSDNode>(Src2)) {
734  int64_t CVal = C->getSExtValue();
735  if (CVal >= -15 && CVal <= 16)
736  break;
737  }
738  bool IsUnsigned = IntNo == Intrinsic::riscv_vmsgeu_mask;
739  MVT Src1VT = Src1.getSimpleValueType();
740  unsigned VMSLTOpcode, VMSLTMaskOpcode, VMXOROpcode, VMANDNOTOpcode;
741  switch (RISCVTargetLowering::getLMUL(Src1VT)) {
742  default:
743  llvm_unreachable("Unexpected LMUL!");
745  VMSLTOpcode =
746  IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF8 : RISCV::PseudoVMSLT_VX_MF8;
747  VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF8_MASK
748  : RISCV::PseudoVMSLT_VX_MF8_MASK;
749  break;
751  VMSLTOpcode =
752  IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF4 : RISCV::PseudoVMSLT_VX_MF4;
753  VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF4_MASK
754  : RISCV::PseudoVMSLT_VX_MF4_MASK;
755  break;
757  VMSLTOpcode =
758  IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF2 : RISCV::PseudoVMSLT_VX_MF2;
759  VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF2_MASK
760  : RISCV::PseudoVMSLT_VX_MF2_MASK;
761  break;
763  VMSLTOpcode =
764  IsUnsigned ? RISCV::PseudoVMSLTU_VX_M1 : RISCV::PseudoVMSLT_VX_M1;
765  VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_M1_MASK
766  : RISCV::PseudoVMSLT_VX_M1_MASK;
767  break;
769  VMSLTOpcode =
770  IsUnsigned ? RISCV::PseudoVMSLTU_VX_M2 : RISCV::PseudoVMSLT_VX_M2;
771  VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_M2_MASK
772  : RISCV::PseudoVMSLT_VX_M2_MASK;
773  break;
775  VMSLTOpcode =
776  IsUnsigned ? RISCV::PseudoVMSLTU_VX_M4 : RISCV::PseudoVMSLT_VX_M4;
777  VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_M4_MASK
778  : RISCV::PseudoVMSLT_VX_M4_MASK;
779  break;
781  VMSLTOpcode =
782  IsUnsigned ? RISCV::PseudoVMSLTU_VX_M8 : RISCV::PseudoVMSLT_VX_M8;
783  VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_M8_MASK
784  : RISCV::PseudoVMSLT_VX_M8_MASK;
785  break;
786  }
787  // Mask operations use the LMUL from the mask type.
788  switch (RISCVTargetLowering::getLMUL(VT)) {
789  default:
790  llvm_unreachable("Unexpected LMUL!");
792  VMXOROpcode = RISCV::PseudoVMXOR_MM_MF8;
793  VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_MF8;
794  break;
796  VMXOROpcode = RISCV::PseudoVMXOR_MM_MF4;
797  VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_MF4;
798  break;
800  VMXOROpcode = RISCV::PseudoVMXOR_MM_MF2;
801  VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_MF2;
802  break;
804  VMXOROpcode = RISCV::PseudoVMXOR_MM_M1;
805  VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_M1;
806  break;
808  VMXOROpcode = RISCV::PseudoVMXOR_MM_M2;
809  VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_M2;
810  break;
812  VMXOROpcode = RISCV::PseudoVMXOR_MM_M4;
813  VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_M4;
814  break;
816  VMXOROpcode = RISCV::PseudoVMXOR_MM_M8;
817  VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_M8;
818  break;
819  }
821  Log2_32(Src1VT.getScalarSizeInBits()), DL, XLenVT);
822  SDValue MaskSEW = CurDAG->getTargetConstant(0, DL, XLenVT);
823  SDValue VL;
824  selectVLOp(Node->getOperand(5), VL);
825  SDValue MaskedOff = Node->getOperand(1);
826  SDValue Mask = Node->getOperand(4);
827  // If the MaskedOff value and the Mask are the same value use
828  // vmslt{u}.vx vt, va, x; vmandnot.mm vd, vd, vt
829  // This avoids needing to copy v0 to vd before starting the next sequence.
830  if (Mask == MaskedOff) {
831  SDValue Cmp = SDValue(
832  CurDAG->getMachineNode(VMSLTOpcode, DL, VT, {Src1, Src2, VL, SEW}),
833  0);
834  ReplaceNode(Node, CurDAG->getMachineNode(VMANDNOTOpcode, DL, VT,
835  {Mask, Cmp, VL, MaskSEW}));
836  return;
837  }
838 
839  // Mask needs to be copied to V0.
841  RISCV::V0, Mask, SDValue());
842  SDValue Glue = Chain.getValue(1);
843  SDValue V0 = CurDAG->getRegister(RISCV::V0, VT);
844 
845  // Otherwise use
846  // vmslt{u}.vx vd, va, x, v0.t; vmxor.mm vd, vd, v0
847  SDValue Cmp = SDValue(
848  CurDAG->getMachineNode(VMSLTMaskOpcode, DL, VT,
849  {MaskedOff, Src1, Src2, V0, VL, SEW, Glue}),
850  0);
851  ReplaceNode(Node, CurDAG->getMachineNode(VMXOROpcode, DL, VT,
852  {Cmp, Mask, VL, MaskSEW}));
853  return;
854  }
855  }
856  break;
857  }
858  case ISD::INTRINSIC_W_CHAIN: {
859  unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
860  switch (IntNo) {
861  // By default we do not custom select any intrinsic.
862  default:
863  break;
864 
865  case Intrinsic::riscv_vsetvli:
866  case Intrinsic::riscv_vsetvlimax: {
867  if (!Subtarget->hasStdExtV())
868  break;
869 
870  bool VLMax = IntNo == Intrinsic::riscv_vsetvlimax;
871  unsigned Offset = VLMax ? 2 : 3;
872 
873  assert(Node->getNumOperands() == Offset + 2 &&
874  "Unexpected number of operands");
875 
876  unsigned SEW =
877  RISCVVType::decodeVSEW(Node->getConstantOperandVal(Offset) & 0x7);
878  RISCVII::VLMUL VLMul = static_cast<RISCVII::VLMUL>(
879  Node->getConstantOperandVal(Offset + 1) & 0x7);
880 
881  unsigned VTypeI = RISCVVType::encodeVTYPE(
882  VLMul, SEW, /*TailAgnostic*/ true, /*MaskAgnostic*/ false);
883  SDValue VTypeIOp = CurDAG->getTargetConstant(VTypeI, DL, XLenVT);
884 
885  SDValue VLOperand;
886  unsigned Opcode = RISCV::PseudoVSETVLI;
887  if (VLMax) {
888  VLOperand = CurDAG->getRegister(RISCV::X0, XLenVT);
889  Opcode = RISCV::PseudoVSETVLIX0;
890  } else {
891  VLOperand = Node->getOperand(2);
892 
893  if (auto *C = dyn_cast<ConstantSDNode>(VLOperand)) {
894  uint64_t AVL = C->getZExtValue();
895  if (isUInt<5>(AVL)) {
896  SDValue VLImm = CurDAG->getTargetConstant(AVL, DL, XLenVT);
897  ReplaceNode(
898  Node, CurDAG->getMachineNode(RISCV::PseudoVSETIVLI, DL, XLenVT,
899  MVT::Other, VLImm, VTypeIOp,
900  /* Chain */ Node->getOperand(0)));
901  return;
902  }
903  }
904  }
905 
906  ReplaceNode(Node,
907  CurDAG->getMachineNode(Opcode, DL, XLenVT,
908  MVT::Other, VLOperand, VTypeIOp,
909  /* Chain */ Node->getOperand(0)));
910  return;
911  }
912  case Intrinsic::riscv_vlseg2:
913  case Intrinsic::riscv_vlseg3:
914  case Intrinsic::riscv_vlseg4:
915  case Intrinsic::riscv_vlseg5:
916  case Intrinsic::riscv_vlseg6:
917  case Intrinsic::riscv_vlseg7:
918  case Intrinsic::riscv_vlseg8: {
919  selectVLSEG(Node, /*IsMasked*/ false, /*IsStrided*/ false);
920  return;
921  }
922  case Intrinsic::riscv_vlseg2_mask:
923  case Intrinsic::riscv_vlseg3_mask:
924  case Intrinsic::riscv_vlseg4_mask:
925  case Intrinsic::riscv_vlseg5_mask:
926  case Intrinsic::riscv_vlseg6_mask:
927  case Intrinsic::riscv_vlseg7_mask:
928  case Intrinsic::riscv_vlseg8_mask: {
929  selectVLSEG(Node, /*IsMasked*/ true, /*IsStrided*/ false);
930  return;
931  }
932  case Intrinsic::riscv_vlsseg2:
933  case Intrinsic::riscv_vlsseg3:
934  case Intrinsic::riscv_vlsseg4:
935  case Intrinsic::riscv_vlsseg5:
936  case Intrinsic::riscv_vlsseg6:
937  case Intrinsic::riscv_vlsseg7:
938  case Intrinsic::riscv_vlsseg8: {
939  selectVLSEG(Node, /*IsMasked*/ false, /*IsStrided*/ true);
940  return;
941  }
942  case Intrinsic::riscv_vlsseg2_mask:
943  case Intrinsic::riscv_vlsseg3_mask:
944  case Intrinsic::riscv_vlsseg4_mask:
945  case Intrinsic::riscv_vlsseg5_mask:
946  case Intrinsic::riscv_vlsseg6_mask:
947  case Intrinsic::riscv_vlsseg7_mask:
948  case Intrinsic::riscv_vlsseg8_mask: {
949  selectVLSEG(Node, /*IsMasked*/ true, /*IsStrided*/ true);
950  return;
951  }
952  case Intrinsic::riscv_vloxseg2:
953  case Intrinsic::riscv_vloxseg3:
954  case Intrinsic::riscv_vloxseg4:
955  case Intrinsic::riscv_vloxseg5:
956  case Intrinsic::riscv_vloxseg6:
957  case Intrinsic::riscv_vloxseg7:
958  case Intrinsic::riscv_vloxseg8:
959  selectVLXSEG(Node, /*IsMasked*/ false, /*IsOrdered*/ true);
960  return;
961  case Intrinsic::riscv_vluxseg2:
962  case Intrinsic::riscv_vluxseg3:
963  case Intrinsic::riscv_vluxseg4:
964  case Intrinsic::riscv_vluxseg5:
965  case Intrinsic::riscv_vluxseg6:
966  case Intrinsic::riscv_vluxseg7:
967  case Intrinsic::riscv_vluxseg8:
968  selectVLXSEG(Node, /*IsMasked*/ false, /*IsOrdered*/ false);
969  return;
970  case Intrinsic::riscv_vloxseg2_mask:
971  case Intrinsic::riscv_vloxseg3_mask:
972  case Intrinsic::riscv_vloxseg4_mask:
973  case Intrinsic::riscv_vloxseg5_mask:
974  case Intrinsic::riscv_vloxseg6_mask:
975  case Intrinsic::riscv_vloxseg7_mask:
976  case Intrinsic::riscv_vloxseg8_mask:
977  selectVLXSEG(Node, /*IsMasked*/ true, /*IsOrdered*/ true);
978  return;
979  case Intrinsic::riscv_vluxseg2_mask:
980  case Intrinsic::riscv_vluxseg3_mask:
981  case Intrinsic::riscv_vluxseg4_mask:
982  case Intrinsic::riscv_vluxseg5_mask:
983  case Intrinsic::riscv_vluxseg6_mask:
984  case Intrinsic::riscv_vluxseg7_mask:
985  case Intrinsic::riscv_vluxseg8_mask:
986  selectVLXSEG(Node, /*IsMasked*/ true, /*IsOrdered*/ false);
987  return;
988  case Intrinsic::riscv_vlseg8ff:
989  case Intrinsic::riscv_vlseg7ff:
990  case Intrinsic::riscv_vlseg6ff:
991  case Intrinsic::riscv_vlseg5ff:
992  case Intrinsic::riscv_vlseg4ff:
993  case Intrinsic::riscv_vlseg3ff:
994  case Intrinsic::riscv_vlseg2ff: {
995  selectVLSEGFF(Node, /*IsMasked*/ false);
996  return;
997  }
998  case Intrinsic::riscv_vlseg8ff_mask:
999  case Intrinsic::riscv_vlseg7ff_mask:
1000  case Intrinsic::riscv_vlseg6ff_mask:
1001  case Intrinsic::riscv_vlseg5ff_mask:
1002  case Intrinsic::riscv_vlseg4ff_mask:
1003  case Intrinsic::riscv_vlseg3ff_mask:
1004  case Intrinsic::riscv_vlseg2ff_mask: {
1005  selectVLSEGFF(Node, /*IsMasked*/ true);
1006  return;
1007  }
1008  case Intrinsic::riscv_vloxei:
1009  case Intrinsic::riscv_vloxei_mask:
1010  case Intrinsic::riscv_vluxei:
1011  case Intrinsic::riscv_vluxei_mask: {
1012  bool IsMasked = IntNo == Intrinsic::riscv_vloxei_mask ||
1013  IntNo == Intrinsic::riscv_vluxei_mask;
1014  bool IsOrdered = IntNo == Intrinsic::riscv_vloxei ||
1015  IntNo == Intrinsic::riscv_vloxei_mask;
1016 
1017  MVT VT = Node->getSimpleValueType(0);
1018  unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
1019 
1020  unsigned CurOp = 2;
1022  if (IsMasked)
1023  Operands.push_back(Node->getOperand(CurOp++));
1024 
1025  MVT IndexVT;
1026  addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
1027  /*IsStridedOrIndexed*/ true, Operands,
1028  &IndexVT);
1029 
1031  "Element count mismatch");
1032 
1034  RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
1035  unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());
1036  const RISCV::VLX_VSXPseudo *P = RISCV::getVLXPseudo(
1037  IsMasked, IsOrdered, IndexLog2EEW, static_cast<unsigned>(LMUL),
1038  static_cast<unsigned>(IndexLMUL));
1039  MachineSDNode *Load =
1040  CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
1041 
1042  if (auto *MemOp = dyn_cast<MemSDNode>(Node))
1043  CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
1044 
1045  ReplaceNode(Node, Load);
1046  return;
1047  }
1048  case Intrinsic::riscv_vle1:
1049  case Intrinsic::riscv_vle:
1050  case Intrinsic::riscv_vle_mask:
1051  case Intrinsic::riscv_vlse:
1052  case Intrinsic::riscv_vlse_mask: {
1053  bool IsMasked = IntNo == Intrinsic::riscv_vle_mask ||
1054  IntNo == Intrinsic::riscv_vlse_mask;
1055  bool IsStrided =
1056  IntNo == Intrinsic::riscv_vlse || IntNo == Intrinsic::riscv_vlse_mask;
1057 
1058  MVT VT = Node->getSimpleValueType(0);
1059  unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
1060 
1061  unsigned CurOp = 2;
1063  if (IsMasked)
1064  Operands.push_back(Node->getOperand(CurOp++));
1065 
1066  addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, IsStrided,
1067  Operands);
1068 
1070  const RISCV::VLEPseudo *P =
1071  RISCV::getVLEPseudo(IsMasked, IsStrided, /*FF*/ false, Log2SEW,
1072  static_cast<unsigned>(LMUL));
1073  MachineSDNode *Load =
1074  CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
1075 
1076  if (auto *MemOp = dyn_cast<MemSDNode>(Node))
1077  CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
1078 
1079  ReplaceNode(Node, Load);
1080  return;
1081  }
1082  case Intrinsic::riscv_vleff:
1083  case Intrinsic::riscv_vleff_mask: {
1084  bool IsMasked = IntNo == Intrinsic::riscv_vleff_mask;
1085 
1086  MVT VT = Node->getSimpleValueType(0);
1087  unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
1088 
1089  unsigned CurOp = 2;
1091  if (IsMasked)
1092  Operands.push_back(Node->getOperand(CurOp++));
1093 
1094  addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
1095  /*IsStridedOrIndexed*/ false, Operands);
1096 
1098  const RISCV::VLEPseudo *P =
1099  RISCV::getVLEPseudo(IsMasked, /*Strided*/ false, /*FF*/ true, Log2SEW,
1100  static_cast<unsigned>(LMUL));
1101  MachineSDNode *Load =
1102  CurDAG->getMachineNode(P->Pseudo, DL, Node->getValueType(0),
1104  SDNode *ReadVL = CurDAG->getMachineNode(RISCV::PseudoReadVL, DL, XLenVT,
1105  /*Glue*/ SDValue(Load, 2));
1106 
1107  if (auto *MemOp = dyn_cast<MemSDNode>(Node))
1108  CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
1109 
1110  ReplaceUses(SDValue(Node, 0), SDValue(Load, 0));
1111  ReplaceUses(SDValue(Node, 1), SDValue(ReadVL, 0)); // VL
1112  ReplaceUses(SDValue(Node, 2), SDValue(Load, 1)); // Chain
1113  CurDAG->RemoveDeadNode(Node);
1114  return;
1115  }
1116  }
1117  break;
1118  }
1119  case ISD::INTRINSIC_VOID: {
1120  unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
1121  switch (IntNo) {
1122  case Intrinsic::riscv_vsseg2:
1123  case Intrinsic::riscv_vsseg3:
1124  case Intrinsic::riscv_vsseg4:
1125  case Intrinsic::riscv_vsseg5:
1126  case Intrinsic::riscv_vsseg6:
1127  case Intrinsic::riscv_vsseg7:
1128  case Intrinsic::riscv_vsseg8: {
1129  selectVSSEG(Node, /*IsMasked*/ false, /*IsStrided*/ false);
1130  return;
1131  }
1132  case Intrinsic::riscv_vsseg2_mask:
1133  case Intrinsic::riscv_vsseg3_mask:
1134  case Intrinsic::riscv_vsseg4_mask:
1135  case Intrinsic::riscv_vsseg5_mask:
1136  case Intrinsic::riscv_vsseg6_mask:
1137  case Intrinsic::riscv_vsseg7_mask:
1138  case Intrinsic::riscv_vsseg8_mask: {
1139  selectVSSEG(Node, /*IsMasked*/ true, /*IsStrided*/ false);
1140  return;
1141  }
1142  case Intrinsic::riscv_vssseg2:
1143  case Intrinsic::riscv_vssseg3:
1144  case Intrinsic::riscv_vssseg4:
1145  case Intrinsic::riscv_vssseg5:
1146  case Intrinsic::riscv_vssseg6:
1147  case Intrinsic::riscv_vssseg7:
1148  case Intrinsic::riscv_vssseg8: {
1149  selectVSSEG(Node, /*IsMasked*/ false, /*IsStrided*/ true);
1150  return;
1151  }
1152  case Intrinsic::riscv_vssseg2_mask:
1153  case Intrinsic::riscv_vssseg3_mask:
1154  case Intrinsic::riscv_vssseg4_mask:
1155  case Intrinsic::riscv_vssseg5_mask:
1156  case Intrinsic::riscv_vssseg6_mask:
1157  case Intrinsic::riscv_vssseg7_mask:
1158  case Intrinsic::riscv_vssseg8_mask: {
1159  selectVSSEG(Node, /*IsMasked*/ true, /*IsStrided*/ true);
1160  return;
1161  }
1162  case Intrinsic::riscv_vsoxseg2:
1163  case Intrinsic::riscv_vsoxseg3:
1164  case Intrinsic::riscv_vsoxseg4:
1165  case Intrinsic::riscv_vsoxseg5:
1166  case Intrinsic::riscv_vsoxseg6:
1167  case Intrinsic::riscv_vsoxseg7:
1168  case Intrinsic::riscv_vsoxseg8:
1169  selectVSXSEG(Node, /*IsMasked*/ false, /*IsOrdered*/ true);
1170  return;
1171  case Intrinsic::riscv_vsuxseg2:
1172  case Intrinsic::riscv_vsuxseg3:
1173  case Intrinsic::riscv_vsuxseg4:
1174  case Intrinsic::riscv_vsuxseg5:
1175  case Intrinsic::riscv_vsuxseg6:
1176  case Intrinsic::riscv_vsuxseg7:
1177  case Intrinsic::riscv_vsuxseg8:
1178  selectVSXSEG(Node, /*IsMasked*/ false, /*IsOrdered*/ false);
1179  return;
1180  case Intrinsic::riscv_vsoxseg2_mask:
1181  case Intrinsic::riscv_vsoxseg3_mask:
1182  case Intrinsic::riscv_vsoxseg4_mask:
1183  case Intrinsic::riscv_vsoxseg5_mask:
1184  case Intrinsic::riscv_vsoxseg6_mask:
1185  case Intrinsic::riscv_vsoxseg7_mask:
1186  case Intrinsic::riscv_vsoxseg8_mask:
1187  selectVSXSEG(Node, /*IsMasked*/ true, /*IsOrdered*/ true);
1188  return;
1189  case Intrinsic::riscv_vsuxseg2_mask:
1190  case Intrinsic::riscv_vsuxseg3_mask:
1191  case Intrinsic::riscv_vsuxseg4_mask:
1192  case Intrinsic::riscv_vsuxseg5_mask:
1193  case Intrinsic::riscv_vsuxseg6_mask:
1194  case Intrinsic::riscv_vsuxseg7_mask:
1195  case Intrinsic::riscv_vsuxseg8_mask:
1196  selectVSXSEG(Node, /*IsMasked*/ true, /*IsOrdered*/ false);
1197  return;
1198  case Intrinsic::riscv_vsoxei:
1199  case Intrinsic::riscv_vsoxei_mask:
1200  case Intrinsic::riscv_vsuxei:
1201  case Intrinsic::riscv_vsuxei_mask: {
1202  bool IsMasked = IntNo == Intrinsic::riscv_vsoxei_mask ||
1203  IntNo == Intrinsic::riscv_vsuxei_mask;
1204  bool IsOrdered = IntNo == Intrinsic::riscv_vsoxei ||
1205  IntNo == Intrinsic::riscv_vsoxei_mask;
1206 
1207  MVT VT = Node->getOperand(2)->getSimpleValueType(0);
1208  unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
1209 
1210  unsigned CurOp = 2;
1212  Operands.push_back(Node->getOperand(CurOp++)); // Store value.
1213 
1214  MVT IndexVT;
1215  addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
1216  /*IsStridedOrIndexed*/ true, Operands,
1217  &IndexVT);
1218 
1220  "Element count mismatch");
1221 
1223  RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
1224  unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());
1225  const RISCV::VLX_VSXPseudo *P = RISCV::getVSXPseudo(
1226  IsMasked, IsOrdered, IndexLog2EEW, static_cast<unsigned>(LMUL),
1227  static_cast<unsigned>(IndexLMUL));
1228  MachineSDNode *Store =
1229  CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
1230 
1231  if (auto *MemOp = dyn_cast<MemSDNode>(Node))
1232  CurDAG->setNodeMemRefs(Store, {MemOp->getMemOperand()});
1233 
1234  ReplaceNode(Node, Store);
1235  return;
1236  }
1237  case Intrinsic::riscv_vse1:
1238  case Intrinsic::riscv_vse:
1239  case Intrinsic::riscv_vse_mask:
1240  case Intrinsic::riscv_vsse:
1241  case Intrinsic::riscv_vsse_mask: {
1242  bool IsMasked = IntNo == Intrinsic::riscv_vse_mask ||
1243  IntNo == Intrinsic::riscv_vsse_mask;
1244  bool IsStrided =
1245  IntNo == Intrinsic::riscv_vsse || IntNo == Intrinsic::riscv_vsse_mask;
1246 
1247  MVT VT = Node->getOperand(2)->getSimpleValueType(0);
1248  unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
1249 
1250  unsigned CurOp = 2;
1252  Operands.push_back(Node->getOperand(CurOp++)); // Store value.
1253 
1254  addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, IsStrided,
1255  Operands);
1256 
1258  const RISCV::VSEPseudo *P = RISCV::getVSEPseudo(
1259  IsMasked, IsStrided, Log2SEW, static_cast<unsigned>(LMUL));
1260  MachineSDNode *Store =
1261  CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
1262  if (auto *MemOp = dyn_cast<MemSDNode>(Node))
1263  CurDAG->setNodeMemRefs(Store, {MemOp->getMemOperand()});
1264 
1265  ReplaceNode(Node, Store);
1266  return;
1267  }
1268  }
1269  break;
1270  }
1271  case ISD::BITCAST: {
1272  MVT SrcVT = Node->getOperand(0).getSimpleValueType();
1273  // Just drop bitcasts between vectors if both are fixed or both are
1274  // scalable.
1275  if ((VT.isScalableVector() && SrcVT.isScalableVector()) ||
1276  (VT.isFixedLengthVector() && SrcVT.isFixedLengthVector())) {
1277  ReplaceUses(SDValue(Node, 0), Node->getOperand(0));
1278  CurDAG->RemoveDeadNode(Node);
1279  return;
1280  }
1281  break;
1282  }
1283  case ISD::INSERT_SUBVECTOR: {
1284  SDValue V = Node->getOperand(0);
1285  SDValue SubV = Node->getOperand(1);
1286  SDLoc DL(SubV);
1287  auto Idx = Node->getConstantOperandVal(2);
1288  MVT SubVecVT = SubV.getSimpleValueType();
1289 
1290  const RISCVTargetLowering &TLI = *Subtarget->getTargetLowering();
1291  MVT SubVecContainerVT = SubVecVT;
1292  // Establish the correct scalable-vector types for any fixed-length type.
1293  if (SubVecVT.isFixedLengthVector())
1294  SubVecContainerVT = TLI.getContainerForFixedLengthVector(SubVecVT);
1295  if (VT.isFixedLengthVector())
1296  VT = TLI.getContainerForFixedLengthVector(VT);
1297 
1298  const auto *TRI = Subtarget->getRegisterInfo();
1299  unsigned SubRegIdx;
1300  std::tie(SubRegIdx, Idx) =
1302  VT, SubVecContainerVT, Idx, TRI);
1303 
1304  // If the Idx hasn't been completely eliminated then this is a subvector
1305  // insert which doesn't naturally align to a vector register. These must
1306  // be handled using instructions to manipulate the vector registers.
1307  if (Idx != 0)
1308  break;
1309 
1310  RISCVII::VLMUL SubVecLMUL = RISCVTargetLowering::getLMUL(SubVecContainerVT);
1311  bool IsSubVecPartReg = SubVecLMUL == RISCVII::VLMUL::LMUL_F2 ||
1312  SubVecLMUL == RISCVII::VLMUL::LMUL_F4 ||
1313  SubVecLMUL == RISCVII::VLMUL::LMUL_F8;
1314  (void)IsSubVecPartReg; // Silence unused variable warning without asserts.
1315  assert((!IsSubVecPartReg || V.isUndef()) &&
1316  "Expecting lowering to have created legal INSERT_SUBVECTORs when "
1317  "the subvector is smaller than a full-sized register");
1318 
1319  // If we haven't set a SubRegIdx, then we must be going between
1320  // equally-sized LMUL groups (e.g. VR -> VR). This can be done as a copy.
1321  if (SubRegIdx == RISCV::NoSubRegister) {
1322  unsigned InRegClassID = RISCVTargetLowering::getRegClassIDForVecVT(VT);
1324  InRegClassID &&
1325  "Unexpected subvector extraction");
1326  SDValue RC = CurDAG->getTargetConstant(InRegClassID, DL, XLenVT);
1327  SDNode *NewNode = CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS,
1328  DL, VT, SubV, RC);
1329  ReplaceNode(Node, NewNode);
1330  return;
1331  }
1332 
1333  SDValue Insert = CurDAG->getTargetInsertSubreg(SubRegIdx, DL, VT, V, SubV);
1334  ReplaceNode(Node, Insert.getNode());
1335  return;
1336  }
1337  case ISD::EXTRACT_SUBVECTOR: {
1338  SDValue V = Node->getOperand(0);
1339  auto Idx = Node->getConstantOperandVal(1);
1340  MVT InVT = V.getSimpleValueType();
1341  SDLoc DL(V);
1342 
1343  const RISCVTargetLowering &TLI = *Subtarget->getTargetLowering();
1344  MVT SubVecContainerVT = VT;
1345  // Establish the correct scalable-vector types for any fixed-length type.
1346  if (VT.isFixedLengthVector())
1347  SubVecContainerVT = TLI.getContainerForFixedLengthVector(VT);
1348  if (InVT.isFixedLengthVector())
1349  InVT = TLI.getContainerForFixedLengthVector(InVT);
1350 
1351  const auto *TRI = Subtarget->getRegisterInfo();
1352  unsigned SubRegIdx;
1353  std::tie(SubRegIdx, Idx) =
1355  InVT, SubVecContainerVT, Idx, TRI);
1356 
1357  // If the Idx hasn't been completely eliminated then this is a subvector
1358  // extract which doesn't naturally align to a vector register. These must
1359  // be handled using instructions to manipulate the vector registers.
1360  if (Idx != 0)
1361  break;
1362 
1363  // If we haven't set a SubRegIdx, then we must be going between
1364  // equally-sized LMUL types (e.g. VR -> VR). This can be done as a copy.
1365  if (SubRegIdx == RISCV::NoSubRegister) {
1366  unsigned InRegClassID = RISCVTargetLowering::getRegClassIDForVecVT(InVT);
1368  InRegClassID &&
1369  "Unexpected subvector extraction");
1370  SDValue RC = CurDAG->getTargetConstant(InRegClassID, DL, XLenVT);
1371  SDNode *NewNode =
1372  CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS, DL, VT, V, RC);
1373  ReplaceNode(Node, NewNode);
1374  return;
1375  }
1376 
1377  SDValue Extract = CurDAG->getTargetExtractSubreg(SubRegIdx, DL, VT, V);
1378  ReplaceNode(Node, Extract.getNode());
1379  return;
1380  }
1381  case RISCVISD::VMV_V_X_VL:
1382  case RISCVISD::VFMV_V_F_VL: {
1383  // Try to match splat of a scalar load to a strided load with stride of x0.
1384  SDValue Src = Node->getOperand(0);
1385  auto *Ld = dyn_cast<LoadSDNode>(Src);
1386  if (!Ld)
1387  break;
1388  EVT MemVT = Ld->getMemoryVT();
1389  // The memory VT should be the same size as the element type.
1390  if (MemVT.getStoreSize() != VT.getVectorElementType().getStoreSize())
1391  break;
1392  if (!IsProfitableToFold(Src, Node, Node) ||
1393  !IsLegalToFold(Src, Node, Node, TM.getOptLevel()))
1394  break;
1395 
1396  SDValue VL;
1397  selectVLOp(Node->getOperand(1), VL);
1398 
1399  unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
1400  SDValue SEW = CurDAG->getTargetConstant(Log2SEW, DL, XLenVT);
1401 
1402  SDValue Operands[] = {Ld->getBasePtr(),
1403  CurDAG->getRegister(RISCV::X0, XLenVT), VL, SEW,
1404  Ld->getChain()};
1405 
1407  const RISCV::VLEPseudo *P = RISCV::getVLEPseudo(
1408  /*IsMasked*/ false, /*IsStrided*/ true, /*FF*/ false, Log2SEW,
1409  static_cast<unsigned>(LMUL));
1410  MachineSDNode *Load =
1411  CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
1412 
1413  if (auto *MemOp = dyn_cast<MemSDNode>(Node))
1414  CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
1415 
1416  ReplaceNode(Node, Load);
1417  return;
1418  }
1419  }
1420 
1421  // Select the default instruction.
1422  SelectCode(Node);
1423 }
1424 
1426  const SDValue &Op, unsigned ConstraintID, std::vector<SDValue> &OutOps) {
1427  switch (ConstraintID) {
1429  // We just support simple memory operands that have a single address
1430  // operand and need no special handling.
1431  OutOps.push_back(Op);
1432  return false;
1434  OutOps.push_back(Op);
1435  return false;
1436  default:
1437  break;
1438  }
1439 
1440  return true;
1441 }
1442 
1444  if (auto *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
1445  Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), Subtarget->getXLenVT());
1446  return true;
1447  }
1448  return false;
1449 }
1450 
1452  // If this is FrameIndex, select it directly. Otherwise just let it get
1453  // selected to a register independently.
1454  if (auto *FIN = dyn_cast<FrameIndexSDNode>(Addr))
1455  Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), Subtarget->getXLenVT());
1456  else
1457  Base = Addr;
1458  return true;
1459 }
1460 
1462  SDValue &ShAmt) {
1463  // Shift instructions on RISCV only read the lower 5 or 6 bits of the shift
1464  // amount. If there is an AND on the shift amount, we can bypass it if it
1465  // doesn't affect any of those bits.
1466  if (N.getOpcode() == ISD::AND && isa<ConstantSDNode>(N.getOperand(1))) {
1467  const APInt &AndMask = N->getConstantOperandAPInt(1);
1468 
1469  // Since the max shift amount is a power of 2 we can subtract 1 to make a
1470  // mask that covers the bits needed to represent all shift amounts.
1471  assert(isPowerOf2_32(ShiftWidth) && "Unexpected max shift amount!");
1472  APInt ShMask(AndMask.getBitWidth(), ShiftWidth - 1);
1473 
1474  if (ShMask.isSubsetOf(AndMask)) {
1475  ShAmt = N.getOperand(0);
1476  return true;
1477  }
1478 
1479  // SimplifyDemandedBits may have optimized the mask so try restoring any
1480  // bits that are known zero.
1481  KnownBits Known = CurDAG->computeKnownBits(N->getOperand(0));
1482  if (ShMask.isSubsetOf(AndMask | Known.Zero)) {
1483  ShAmt = N.getOperand(0);
1484  return true;
1485  }
1486  }
1487 
1488  ShAmt = N;
1489  return true;
1490 }
1491 
1493  if (N.getOpcode() == ISD::SIGN_EXTEND_INREG &&
1494  cast<VTSDNode>(N.getOperand(1))->getVT() == MVT::i32) {
1495  Val = N.getOperand(0);
1496  return true;
1497  }
1498  MVT VT = N.getSimpleValueType();
1499  if (CurDAG->ComputeNumSignBits(N) > (VT.getSizeInBits() - 32)) {
1500  Val = N;
1501  return true;
1502  }
1503 
1504  return false;
1505 }
1506 
1508  if (N.getOpcode() == ISD::AND) {
1509  auto *C = dyn_cast<ConstantSDNode>(N.getOperand(1));
1510  if (C && C->getZExtValue() == UINT64_C(0xFFFFFFFF)) {
1511  Val = N.getOperand(0);
1512  return true;
1513  }
1514  }
1515  MVT VT = N.getSimpleValueType();
1517  if (CurDAG->MaskedValueIsZero(N, Mask)) {
1518  Val = N;
1519  return true;
1520  }
1521 
1522  return false;
1523 }
1524 
1525 // Return true if all users of this SDNode* only consume the lower \p Bits.
1526 // This can be used to form W instructions for add/sub/mul/shl even when the
1527 // root isn't a sext_inreg. This can allow the ADDW/SUBW/MULW/SLLIW to CSE if
1528 // SimplifyDemandedBits has made it so some users see a sext_inreg and some
1529 // don't. The sext_inreg+add/sub/mul/shl will get selected, but still leave
1530 // the add/sub/mul/shl to become non-W instructions. By checking the users we
1531 // may be able to use a W instruction and CSE with the other instruction if
1532 // this has happened. We could try to detect that the CSE opportunity exists
1533 // before doing this, but that would be more complicated.
1534 // TODO: Does this need to look through AND/OR/XOR to their users to find more
1535 // opportunities.
1536 bool RISCVDAGToDAGISel::hasAllNBitUsers(SDNode *Node, unsigned Bits) const {
1537  assert((Node->getOpcode() == ISD::ADD || Node->getOpcode() == ISD::SUB ||
1538  Node->getOpcode() == ISD::MUL || Node->getOpcode() == ISD::SHL ||
1539  Node->getOpcode() == ISD::SRL ||
1540  Node->getOpcode() == ISD::SIGN_EXTEND_INREG ||
1541  isa<ConstantSDNode>(Node)) &&
1542  "Unexpected opcode");
1543 
1544  for (auto UI = Node->use_begin(), UE = Node->use_end(); UI != UE; ++UI) {
1545  SDNode *User = *UI;
1546  // Users of this node should have already been instruction selected
1547  if (!User->isMachineOpcode())
1548  return false;
1549 
1550  // TODO: Add more opcodes?
1551  switch (User->getMachineOpcode()) {
1552  default:
1553  return false;
1554  case RISCV::ADDW:
1555  case RISCV::ADDIW:
1556  case RISCV::SUBW:
1557  case RISCV::MULW:
1558  case RISCV::SLLW:
1559  case RISCV::SLLIW:
1560  case RISCV::SRAW:
1561  case RISCV::SRAIW:
1562  case RISCV::SRLW:
1563  case RISCV::SRLIW:
1564  case RISCV::DIVW:
1565  case RISCV::DIVUW:
1566  case RISCV::REMW:
1567  case RISCV::REMUW:
1568  case RISCV::ROLW:
1569  case RISCV::RORW:
1570  case RISCV::RORIW:
1571  case RISCV::CLZW:
1572  case RISCV::CTZW:
1573  case RISCV::CPOPW:
1574  case RISCV::SLLIUW:
1575  if (Bits < 32)
1576  return false;
1577  break;
1578  case RISCV::SLLI:
1579  // SLLI only uses the lower (XLen - ShAmt) bits.
1580  if (Bits < Subtarget->getXLen() - User->getConstantOperandVal(1))
1581  return false;
1582  break;
1583  case RISCV::ADDUW:
1584  case RISCV::SH1ADDUW:
1585  case RISCV::SH2ADDUW:
1586  case RISCV::SH3ADDUW:
1587  // The first operand to add.uw/shXadd.uw is implicitly zero extended from
1588  // 32 bits.
1589  if (UI.getOperandNo() != 0 || Bits < 32)
1590  return false;
1591  break;
1592  case RISCV::SB:
1593  if (UI.getOperandNo() != 0 || Bits < 8)
1594  return false;
1595  break;
1596  case RISCV::SH:
1597  if (UI.getOperandNo() != 0 || Bits < 16)
1598  return false;
1599  break;
1600  case RISCV::SW:
1601  if (UI.getOperandNo() != 0 || Bits < 32)
1602  return false;
1603  break;
1604  }
1605  }
1606 
1607  return true;
1608 }
1609 
1610 // Select VL as a 5 bit immediate or a value that will become a register. This
1611 // allows us to choose betwen VSETIVLI or VSETVLI later.
1613  auto *C = dyn_cast<ConstantSDNode>(N);
1614  if (C && isUInt<5>(C->getZExtValue()))
1615  VL = CurDAG->getTargetConstant(C->getZExtValue(), SDLoc(N),
1616  N->getValueType(0));
1617  else
1618  VL = N;
1619 
1620  return true;
1621 }
1622 
1624  if (N.getOpcode() != ISD::SPLAT_VECTOR &&
1625  N.getOpcode() != RISCVISD::SPLAT_VECTOR_I64 &&
1626  N.getOpcode() != RISCVISD::VMV_V_X_VL)
1627  return false;
1628  SplatVal = N.getOperand(0);
1629  return true;
1630 }
1631 
1632 using ValidateFn = bool (*)(int64_t);
1633 
1634 static bool selectVSplatSimmHelper(SDValue N, SDValue &SplatVal,
1635  SelectionDAG &DAG,
1636  const RISCVSubtarget &Subtarget,
1637  ValidateFn ValidateImm) {
1638  if ((N.getOpcode() != ISD::SPLAT_VECTOR &&
1639  N.getOpcode() != RISCVISD::SPLAT_VECTOR_I64 &&
1640  N.getOpcode() != RISCVISD::VMV_V_X_VL) ||
1641  !isa<ConstantSDNode>(N.getOperand(0)))
1642  return false;
1643 
1644  int64_t SplatImm = cast<ConstantSDNode>(N.getOperand(0))->getSExtValue();
1645 
1646  // ISD::SPLAT_VECTOR, RISCVISD::SPLAT_VECTOR_I64 and RISCVISD::VMV_V_X_VL
1647  // share semantics when the operand type is wider than the resulting vector
1648  // element type: an implicit truncation first takes place. Therefore, perform
1649  // a manual truncation/sign-extension in order to ignore any truncated bits
1650  // and catch any zero-extended immediate.
1651  // For example, we wish to match (i8 -1) -> (XLenVT 255) as a simm5 by first
1652  // sign-extending to (XLenVT -1).
1653  MVT XLenVT = Subtarget.getXLenVT();
1654  assert(XLenVT == N.getOperand(0).getSimpleValueType() &&
1655  "Unexpected splat operand type");
1656  MVT EltVT = N.getSimpleValueType().getVectorElementType();
1657  if (EltVT.bitsLT(XLenVT))
1658  SplatImm = SignExtend64(SplatImm, EltVT.getSizeInBits());
1659 
1660  if (!ValidateImm(SplatImm))
1661  return false;
1662 
1663  SplatVal = DAG.getTargetConstant(SplatImm, SDLoc(N), XLenVT);
1664  return true;
1665 }
1666 
1668  return selectVSplatSimmHelper(N, SplatVal, *CurDAG, *Subtarget,
1669  [](int64_t Imm) { return isInt<5>(Imm); });
1670 }
1671 
1673  return selectVSplatSimmHelper(
1674  N, SplatVal, *CurDAG, *Subtarget,
1675  [](int64_t Imm) { return (isInt<5>(Imm) && Imm != -16) || Imm == 16; });
1676 }
1677 
1679  SDValue &SplatVal) {
1680  return selectVSplatSimmHelper(
1681  N, SplatVal, *CurDAG, *Subtarget, [](int64_t Imm) {
1682  return Imm != 0 && ((isInt<5>(Imm) && Imm != -16) || Imm == 16);
1683  });
1684 }
1685 
1687  if ((N.getOpcode() != ISD::SPLAT_VECTOR &&
1688  N.getOpcode() != RISCVISD::SPLAT_VECTOR_I64 &&
1689  N.getOpcode() != RISCVISD::VMV_V_X_VL) ||
1690  !isa<ConstantSDNode>(N.getOperand(0)))
1691  return false;
1692 
1693  int64_t SplatImm = cast<ConstantSDNode>(N.getOperand(0))->getSExtValue();
1694 
1695  if (!isUInt<5>(SplatImm))
1696  return false;
1697 
1698  SplatVal =
1699  CurDAG->getTargetConstant(SplatImm, SDLoc(N), Subtarget->getXLenVT());
1700 
1701  return true;
1702 }
1703 
1705  SDValue &Imm) {
1706  if (auto *C = dyn_cast<ConstantSDNode>(N)) {
1707  int64_t ImmVal = SignExtend64(C->getSExtValue(), Width);
1708 
1709  if (!isInt<5>(ImmVal))
1710  return false;
1711 
1712  Imm = CurDAG->getTargetConstant(ImmVal, SDLoc(N), Subtarget->getXLenVT());
1713  return true;
1714  }
1715 
1716  return false;
1717 }
1718 
1719 // Merge an ADDI into the offset of a load/store instruction where possible.
1720 // (load (addi base, off1), off2) -> (load base, off1+off2)
1721 // (store val, (addi base, off1), off2) -> (store val, base, off1+off2)
1722 // This is possible when off1+off2 fits a 12-bit immediate.
1723 bool RISCVDAGToDAGISel::doPeepholeLoadStoreADDI(SDNode *N) {
1724  int OffsetOpIdx;
1725  int BaseOpIdx;
1726 
1727  // Only attempt this optimisation for I-type loads and S-type stores.
1728  switch (N->getMachineOpcode()) {
1729  default:
1730  return false;
1731  case RISCV::LB:
1732  case RISCV::LH:
1733  case RISCV::LW:
1734  case RISCV::LBU:
1735  case RISCV::LHU:
1736  case RISCV::LWU:
1737  case RISCV::LD:
1738  case RISCV::FLH:
1739  case RISCV::FLW:
1740  case RISCV::FLD:
1741  BaseOpIdx = 0;
1742  OffsetOpIdx = 1;
1743  break;
1744  case RISCV::SB:
1745  case RISCV::SH:
1746  case RISCV::SW:
1747  case RISCV::SD:
1748  case RISCV::FSH:
1749  case RISCV::FSW:
1750  case RISCV::FSD:
1751  BaseOpIdx = 1;
1752  OffsetOpIdx = 2;
1753  break;
1754  }
1755 
1756  if (!isa<ConstantSDNode>(N->getOperand(OffsetOpIdx)))
1757  return false;
1758 
1759  SDValue Base = N->getOperand(BaseOpIdx);
1760 
1761  // If the base is an ADDI, we can merge it in to the load/store.
1762  if (!Base.isMachineOpcode() || Base.getMachineOpcode() != RISCV::ADDI)
1763  return false;
1764 
1765  SDValue ImmOperand = Base.getOperand(1);
1766  uint64_t Offset2 = N->getConstantOperandVal(OffsetOpIdx);
1767 
1768  if (auto *Const = dyn_cast<ConstantSDNode>(ImmOperand)) {
1769  int64_t Offset1 = Const->getSExtValue();
1770  int64_t CombinedOffset = Offset1 + Offset2;
1771  if (!isInt<12>(CombinedOffset))
1772  return false;
1773  ImmOperand = CurDAG->getTargetConstant(CombinedOffset, SDLoc(ImmOperand),
1774  ImmOperand.getValueType());
1775  } else if (auto *GA = dyn_cast<GlobalAddressSDNode>(ImmOperand)) {
1776  // If the off1 in (addi base, off1) is a global variable's address (its
1777  // low part, really), then we can rely on the alignment of that variable
1778  // to provide a margin of safety before off1 can overflow the 12 bits.
1779  // Check if off2 falls within that margin; if so off1+off2 can't overflow.
1780  const DataLayout &DL = CurDAG->getDataLayout();
1781  Align Alignment = GA->getGlobal()->getPointerAlignment(DL);
1782  if (Offset2 != 0 && Alignment <= Offset2)
1783  return false;
1784  int64_t Offset1 = GA->getOffset();
1785  int64_t CombinedOffset = Offset1 + Offset2;
1786  ImmOperand = CurDAG->getTargetGlobalAddress(
1787  GA->getGlobal(), SDLoc(ImmOperand), ImmOperand.getValueType(),
1788  CombinedOffset, GA->getTargetFlags());
1789  } else if (auto *CP = dyn_cast<ConstantPoolSDNode>(ImmOperand)) {
1790  // Ditto.
1791  Align Alignment = CP->getAlign();
1792  if (Offset2 != 0 && Alignment <= Offset2)
1793  return false;
1794  int64_t Offset1 = CP->getOffset();
1795  int64_t CombinedOffset = Offset1 + Offset2;
1796  ImmOperand = CurDAG->getTargetConstantPool(
1797  CP->getConstVal(), ImmOperand.getValueType(), CP->getAlign(),
1798  CombinedOffset, CP->getTargetFlags());
1799  } else {
1800  return false;
1801  }
1802 
1803  LLVM_DEBUG(dbgs() << "Folding add-immediate into mem-op:\nBase: ");
1804  LLVM_DEBUG(Base->dump(CurDAG));
1805  LLVM_DEBUG(dbgs() << "\nN: ");
1806  LLVM_DEBUG(N->dump(CurDAG));
1807  LLVM_DEBUG(dbgs() << "\n");
1808 
1809  // Modify the offset operand of the load/store.
1810  if (BaseOpIdx == 0) // Load
1811  CurDAG->UpdateNodeOperands(N, Base.getOperand(0), ImmOperand,
1812  N->getOperand(2));
1813  else // Store
1814  CurDAG->UpdateNodeOperands(N, N->getOperand(0), Base.getOperand(0),
1815  ImmOperand, N->getOperand(3));
1816 
1817  return true;
1818 }
1819 
1820 // Try to remove sext.w if the input is a W instruction or can be made into
1821 // a W instruction cheaply.
1822 bool RISCVDAGToDAGISel::doPeepholeSExtW(SDNode *N) {
1823  // Look for the sext.w pattern, addiw rd, rs1, 0.
1824  if (N->getMachineOpcode() != RISCV::ADDIW ||
1825  !isNullConstant(N->getOperand(1)))
1826  return false;
1827 
1828  SDValue N0 = N->getOperand(0);
1829  if (!N0.isMachineOpcode())
1830  return false;
1831 
1832  switch (N0.getMachineOpcode()) {
1833  default:
1834  break;
1835  case RISCV::ADD:
1836  case RISCV::ADDI:
1837  case RISCV::SUB:
1838  case RISCV::MUL:
1839  case RISCV::SLLI: {
1840  // Convert sext.w+add/sub/mul to their W instructions. This will create
1841  // a new independent instruction. This improves latency.
1842  unsigned Opc;
1843  switch (N0.getMachineOpcode()) {
1844  default:
1845  llvm_unreachable("Unexpected opcode!");
1846  case RISCV::ADD: Opc = RISCV::ADDW; break;
1847  case RISCV::ADDI: Opc = RISCV::ADDIW; break;
1848  case RISCV::SUB: Opc = RISCV::SUBW; break;
1849  case RISCV::MUL: Opc = RISCV::MULW; break;
1850  case RISCV::SLLI: Opc = RISCV::SLLIW; break;
1851  }
1852 
1853  SDValue N00 = N0.getOperand(0);
1854  SDValue N01 = N0.getOperand(1);
1855 
1856  // Shift amount needs to be uimm5.
1857  if (N0.getMachineOpcode() == RISCV::SLLI &&
1858  !isUInt<5>(cast<ConstantSDNode>(N01)->getSExtValue()))
1859  break;
1860 
1861  SDNode *Result =
1862  CurDAG->getMachineNode(Opc, SDLoc(N), N->getValueType(0),
1863  N00, N01);
1864  ReplaceUses(N, Result);
1865  return true;
1866  }
1867  case RISCV::ADDW:
1868  case RISCV::ADDIW:
1869  case RISCV::SUBW:
1870  case RISCV::MULW:
1871  case RISCV::SLLIW:
1872  // Result is already sign extended just remove the sext.w.
1873  // NOTE: We only handle the nodes that are selected with hasAllWUsers.
1874  ReplaceUses(N, N0.getNode());
1875  return true;
1876  }
1877 
1878  return false;
1879 }
1880 
1881 // This pass converts a legalized DAG into a RISCV-specific DAG, ready
1882 // for instruction scheduling.
1884  return new RISCVDAGToDAGISel(TM);
1885 }
llvm::ISD::SUB
@ SUB
Definition: ISDOpcodes.h:240
llvm::RISCVII::LMUL_1
@ LMUL_1
Definition: RISCVBaseInfo.h:96
llvm::TargetMachine::getOptLevel
CodeGenOpt::Level getOptLevel() const
Returns the optimization level: None, Less, Default, or Aggressive.
Definition: TargetMachine.cpp:185
llvm::RISCVMatInt::Inst
Definition: RISCVMatInt.h:21
llvm::MVT::getVectorElementType
MVT getVectorElementType() const
Definition: MachineValueType.h:519
llvm::MVT::getStoreSize
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
Definition: MachineValueType.h:1072
llvm::ISD::INTRINSIC_VOID
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
Definition: ISDOpcodes.h:199
llvm::RISCVDAGToDAGISel::selectVLXSEG
void selectVLXSEG(SDNode *Node, bool IsMasked, bool IsOrdered)
Definition: RISCVISelDAGToDAG.cpp:335
MathExtras.h
llvm::SelectionDAGISel::TLI
const TargetLowering * TLI
Definition: SelectionDAGISel.h:53
llvm
---------------------— PointerInfo ------------------------------------—
Definition: AllocatorList.h:23
llvm::RISCVDAGToDAGISel::addVectorLoadStoreOperands
void addVectorLoadStoreOperands(SDNode *Node, unsigned SEWImm, const SDLoc &DL, unsigned CurOp, bool IsMasked, bool IsStridedOrIndexed, SmallVectorImpl< SDValue > &Operands, MVT *IndexVT=nullptr)
Definition: RISCVISelDAGToDAG.cpp:213
llvm::RISCVISD::SLLW
@ SLLW
Definition: RISCVISelLowering.h:48
llvm::SelectionDAGISel::TM
TargetMachine & TM
Definition: SelectionDAGISel.h:41
llvm::RISCV::VLSEGPseudo
Definition: RISCVISelDAGToDAG.h:121
llvm::SDLoc
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Definition: SelectionDAGNodes.h:1086
llvm::MVT::isFixedLengthVector
bool isFixedLengthVector() const
Definition: MachineValueType.h:378
llvm::RISCVDAGToDAGISel::selectVSplatSimm5Plus1
bool selectVSplatSimm5Plus1(SDValue N, SDValue &SplatVal)
Definition: RISCVISelDAGToDAG.cpp:1672
llvm::DataLayout
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:112
llvm::RISCVDAGToDAGISel::PreprocessISelDAG
void PreprocessISelDAG() override
PreprocessISelDAG - This hook allows targets to hack on the graph before instruction selection starts...
Definition: RISCVISelDAGToDAG.cpp:44
llvm::ISD::BITCAST
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
Definition: ISDOpcodes.h:848
Insert
Vector Rotate Left Mask Mask Insert
Definition: README_P9.txt:112
llvm::RISCVSubtarget::getTargetLowering
const RISCVTargetLowering * getTargetLowering() const override
Definition: RISCVSubtarget.h:98
llvm::SelectionDAG::getCopyToReg
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, unsigned Reg, SDValue N)
Definition: SelectionDAG.h:735
llvm::RISCV::VLXSEGPseudo
Definition: RISCVISelDAGToDAG.h:131
llvm::SDValue::getNode
SDNode * getNode() const
get the SDNode which holds the desired result
Definition: SelectionDAGNodes.h:152
llvm::RISCVDAGToDAGISel::selectZExti32
bool selectZExti32(SDValue N, SDValue &Val)
Definition: RISCVISelDAGToDAG.cpp:1507
llvm::SelectionDAG::allnodes_end
allnodes_const_iterator allnodes_end() const
Definition: SelectionDAG.h:494
P
This currently compiles esp xmm0 movsd esp eax eax esp ret We should use not the dag combiner This is because dagcombine2 needs to be able to see through the X86ISD::Wrapper which DAGCombine can t really do The code for turning x load into a single vector load is target independent and should be moved to the dag combiner The code for turning x load into a vector load can only handle a direct load from a global or a direct load from the stack It should be generalized to handle any load from P
Definition: README-SSE.txt:411
llvm::ARM_MB::LD
@ LD
Definition: ARMBaseInfo.h:72
llvm::KnownBits::Zero
APInt Zero
Definition: KnownBits.h:24
C1
instcombine should handle this C2 when C1
Definition: README.txt:263
llvm::RISCVISD::DIVUW
@ DIVUW
Definition: RISCVISelLowering.h:55
llvm::MVT::bitsLT
bool bitsLT(MVT VT) const
Return true if this has less bits than VT.
Definition: MachineValueType.h:1128
llvm::SelectionDAG::getFrameIndex
SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
Definition: SelectionDAG.cpp:1603
llvm::SmallVector
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1168
llvm::SelectionDAG::getVTList
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
Definition: SelectionDAG.cpp:8551
llvm::MipsISD::Lo
@ Lo
Definition: MipsISelLowering.h:79
llvm::MachineSDNode
An SDNode that represents everything that will be needed to construct a MachineInstr.
Definition: SelectionDAGNodes.h:2746
llvm::SelectionDAG::allnodes_begin
allnodes_const_iterator allnodes_begin() const
Definition: SelectionDAG.h:493
llvm::RISCVMatInt::generateInstSeq
InstSeq generateInstSeq(int64_t Val, const FeatureBitset &ActiveFeatures)
Definition: RISCVMatInt.cpp:124
llvm::SDNode
Represents one node in the SelectionDAG.
Definition: SelectionDAGNodes.h:455
llvm::RISCVTargetMachine
Definition: RISCVTargetMachine.h:23
llvm::RISCVDAGToDAGISel::selectVSplat
bool selectVSplat(SDValue N, SDValue &SplatVal)
Definition: RISCVISelDAGToDAG.cpp:1623
llvm::RISCVII::LMUL_8
@ LMUL_8
Definition: RISCVBaseInfo.h:99
llvm::MVT::Glue
@ Glue
Definition: MachineValueType.h:262
llvm::MemOp
Definition: TargetLowering.h:112
llvm::RISCVDAGToDAGISel
Definition: RISCVISelDAGToDAG.h:23
llvm::SelectionDAG::getMemBasePlusOffset
SDValue getMemBasePlusOffset(SDValue Base, TypeSize Offset, const SDLoc &DL, const SDNodeFlags Flags=SDNodeFlags())
Returns sum of the base pointer and offset.
Definition: SelectionDAG.cpp:6350
llvm::APInt::getBitWidth
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition: APInt.h:1403
llvm::tgtok::Bits
@ Bits
Definition: TGLexer.h:50
llvm::SelectionDAG::getStore
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
Definition: SelectionDAG.cpp:7512
llvm::RISCV::VLX_VSXPseudo
Definition: RISCVISelDAGToDAG.h:177
Offset
uint64_t Offset
Definition: ELFObjHandler.cpp:81
llvm::InlineAsm::Constraint_m
@ Constraint_m
Definition: InlineAsm.h:247
llvm::SelectionDAG::RemoveDeadNodes
void RemoveDeadNodes()
This method deletes all unreachable nodes in the SelectionDAG.
Definition: SelectionDAG.cpp:830
llvm::RISCVTargetLowering::getRegClassIDForVecVT
static unsigned getRegClassIDForVecVT(MVT VT)
Definition: RISCVISelLowering.cpp:1264
llvm::isPowerOf2_32
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition: MathExtras.h:491
llvm::RISCVDAGToDAGISel::selectVSSEG
void selectVSSEG(SDNode *Node, bool IsMasked, bool IsStrided)
Definition: RISCVISelDAGToDAG.cpp:382
RISCVMatInt.h
llvm::BitmaskEnumDetail::Mask
std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
Definition: BitmaskEnum.h:80
TRI
unsigned const TargetRegisterInfo * TRI
Definition: MachineSink.cpp:1567
LLVM_DEBUG
#define LLVM_DEBUG(X)
Definition: Debug.h:101
llvm::RISCVDAGToDAGISel::selectVSplatSimm5Plus1NonZero
bool selectVSplatSimm5Plus1NonZero(SDValue N, SDValue &SplatVal)
Definition: RISCVISelDAGToDAG.cpp:1678
llvm::RISCVDAGToDAGISel::SelectBaseAddr
bool SelectBaseAddr(SDValue Addr, SDValue &Base)
Definition: RISCVISelDAGToDAG.cpp:1451
KnownBits.h
llvm::MVT::isScalableVector
bool isScalableVector() const
Return true if this is a vector value type where the runtime length is machine dependent.
Definition: MachineValueType.h:373
llvm::SelectionDAG::getRegister
SDValue getRegister(unsigned Reg, EVT VT)
Definition: SelectionDAG.cpp:1985
llvm::MipsISD::Hi
@ Hi
Definition: MipsISelLowering.h:75
llvm::dbgs
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
llvm::RISCV::VSSEGPseudo
Definition: RISCVISelDAGToDAG.h:141
llvm::RISCVII::LMUL_4
@ LMUL_4
Definition: RISCVBaseInfo.h:98
llvm::EVT::getStoreSize
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
Definition: ValueTypes.h:363
llvm::RISCVDAGToDAGISel::selectShiftMask
bool selectShiftMask(SDValue N, unsigned ShiftWidth, SDValue &ShAmt)
Definition: RISCVISelDAGToDAG.cpp:1461
llvm::SelectionDAG::getTargetFrameIndex
SDValue getTargetFrameIndex(int FI, EVT VT)
Definition: SelectionDAG.h:688
llvm::SDValue::getValueType
EVT getValueType() const
Return the ValueType of the referenced return value.
Definition: SelectionDAGNodes.h:1121
llvm::SelectionDAG
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
Definition: SelectionDAG.h:216
llvm::SelectionDAG::UpdateNodeOperands
SDNode * UpdateNodeOperands(SDNode *N, SDValue Op)
Mutate the specified node in-place to have the specified operands.
Definition: SelectionDAG.cpp:8641
llvm::ISD::Constant
@ Constant
Definition: ISDOpcodes.h:76
E
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
llvm::MachineFunction::getInfo
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
Definition: MachineFunction.h:724
llvm::User
Definition: User.h:44
llvm::ISD::SIGN_EXTEND_INREG
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
Definition: ISDOpcodes.h:747
llvm::RISCVISD::SPLAT_VECTOR_I64
@ SPLAT_VECTOR_I64
Definition: RISCVISelLowering.h:137
createTuple
static SDValue createTuple(SelectionDAG &CurDAG, ArrayRef< SDValue > Regs, unsigned NF, RISCVII::VLMUL LMUL)
Definition: RISCVISelDAGToDAG.cpp:196
llvm::SelectionDAG::getTargetLoweringInfo
const TargetLowering & getTargetLoweringInfo() const
Definition: SelectionDAG.h:443
llvm::EVT
Extended Value Type.
Definition: ValueTypes.h:35
C
(vector float) vec_cmpeq(*A, *B) C
Definition: README_ALTIVEC.txt:86
llvm::isShiftedMask_64
constexpr bool isShiftedMask_64(uint64_t Value)
Return true if the argument contains a non-empty sequence of ones with the remainder zero (64 bit ver...
Definition: MathExtras.h:485
llvm::TargetLowering
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
Definition: TargetLowering.h:3189
llvm::MVT::getScalarSizeInBits
uint64_t getScalarSizeInBits() const
Definition: MachineValueType.h:1062
Y
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
llvm::SelectionDAG::MaskedValueIsZero
bool MaskedValueIsZero(SDValue Op, const APInt &Mask, unsigned Depth=0) const
Return true if 'Op & Mask' is known to be zero.
Definition: SelectionDAG.cpp:2463
llvm::RISCVTargetLowering::getSubregIndexByMVT
static unsigned getSubregIndexByMVT(MVT VT, unsigned Index)
Definition: RISCVISelLowering.cpp:1241
llvm::RISCVDAGToDAGISel::SelectAddrFI
bool SelectAddrFI(SDValue Addr, SDValue &Base)
Definition: RISCVISelDAGToDAG.cpp:1443
llvm::ms_demangle::QualifierMangleMode::Result
@ Result
llvm::RISCVSubtarget::getXLenVT
MVT getXLenVT() const
Definition: RISCVSubtarget.h:131
RISCVISelDAGToDAG.h
llvm::SelectionDAGISel::ReplaceNode
void ReplaceNode(SDNode *F, SDNode *T)
Replace all uses of F with T, then remove F from the DAG.
Definition: SelectionDAGISel.h:227
llvm::TypeSize::Fixed
static TypeSize Fixed(ScalarTy MinVal)
Definition: TypeSize.h:423
llvm::Log2_32
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition: MathExtras.h:596
llvm::RISCVVType::decodeVSEW
static unsigned decodeVSEW(unsigned VSEW)
Definition: RISCVBaseInfo.h:346
RISCVMCTargetDesc.h
llvm::APInt::getHighBitsSet
static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet)
Constructs an APInt value that has the top hiBitsSet bits set.
Definition: APInt.h:281
llvm::RISCVDAGToDAGISel::SelectInlineAsmMemoryOperand
bool SelectInlineAsmMemoryOperand(const SDValue &Op, unsigned ConstraintID, std::vector< SDValue > &OutOps) override
SelectInlineAsmMemoryOperand - Select the specified address as a target addressing mode,...
Definition: RISCVISelDAGToDAG.cpp:1425
llvm::SelectionDAG::getMemIntrinsicNode
SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef< SDValue > Ops, EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags Flags=MachineMemOperand::MOLoad|MachineMemOperand::MOStore, uint64_t Size=0, const AAMDNodes &AAInfo=AAMDNodes())
Creates a MemIntrinsicNode that may produce a result and takes a list of operands.
Definition: SelectionDAG.cpp:7239
createM1Tuple
static SDValue createM1Tuple(SelectionDAG &CurDAG, ArrayRef< SDValue > Regs, unsigned NF)
Definition: RISCVISelDAGToDAG.cpp:171
llvm::RISCVSubtarget::hasStdExtZbb
bool hasStdExtZbb() const
Definition: RISCVSubtarget.h:112
llvm::ISD::AND
@ AND
Bitwise operators - logical and, logical or, logical xor.
Definition: ISDOpcodes.h:632
Align
uint64_t Align
Definition: ELFObjHandler.cpp:83
llvm::ISD::SPLAT_VECTOR
@ SPLAT_VECTOR
SPLAT_VECTOR(VAL) - Returns a vector with the scalar value VAL duplicated in all lanes.
Definition: ISDOpcodes.h:590
llvm::RISCVSubtarget::hasStdExtZbp
bool hasStdExtZbp() const
Definition: RISCVSubtarget.h:117
llvm::Align
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
llvm::SDValue::getConstantOperandVal
uint64_t getConstantOperandVal(unsigned i) const
Definition: SelectionDAGNodes.h:1133
llvm::SPII::Load
@ Load
Definition: SparcInstrInfo.h:32
llvm::RISCVISD::DIVW
@ DIVW
Definition: RISCVISelLowering.h:54
X
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
llvm::SelectionDAG::getTargetGlobalAddress
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
Definition: SelectionDAG.h:683
llvm::RISCVISD::CLZW
@ CLZW
Definition: RISCVISelLowering.h:63
Operands
mir Rename Register Operands
Definition: MIRNamerPass.cpp:78
llvm::APInt::isSubsetOf
bool isSubsetOf(const APInt &RHS) const
This operation checks that all bits set in this APInt are also set in RHS.
Definition: APInt.h:1181
selectImm
static SDNode * selectImm(SelectionDAG *CurDAG, const SDLoc &DL, int64_t Imm, const RISCVSubtarget &Subtarget)
Definition: RISCVISelDAGToDAG.cpp:128
createM2Tuple
static SDValue createM2Tuple(SelectionDAG &CurDAG, ArrayRef< SDValue > Regs, unsigned NF)
Definition: RISCVISelDAGToDAG.cpp:181
llvm::SelectionDAG::RemoveDeadNode
void RemoveDeadNode(SDNode *N)
Remove the specified node from the system.
Definition: SelectionDAG.cpp:884
llvm::RISCV::VSEPseudo
Definition: RISCVISelDAGToDAG.h:169
llvm::RISCVDAGToDAGISel::selectVLOp
bool selectVLOp(SDValue N, SDValue &VL)
Definition: RISCVISelDAGToDAG.cpp:1612
llvm::isInt< 32 >
constexpr bool isInt< 32 >(int64_t x)
Definition: MathExtras.h:373
llvm::RISCVDAGToDAGISel::selectVSXSEG
void selectVSXSEG(SDNode *Node, bool IsMasked, bool IsOrdered)
Definition: RISCVISelDAGToDAG.cpp:414
llvm::SelectionDAGISel::IsProfitableToFold
virtual bool IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const
IsProfitableToFold - Returns true if it's profitable to fold the specific operand node N of U during ...
Definition: SelectionDAGISel.cpp:2206
llvm::isUInt< 16 >
constexpr bool isUInt< 16 >(uint64_t x)
Definition: MathExtras.h:408
selectVSplatSimmHelper
static bool selectVSplatSimmHelper(SDValue N, SDValue &SplatVal, SelectionDAG &DAG, const RISCVSubtarget &Subtarget, ValidateFn ValidateImm)
Definition: RISCVISelDAGToDAG.cpp:1634
uint64_t
llvm::RISCVDAGToDAGISel::selectVSplatUimm5
bool selectVSplatUimm5(SDValue N, SDValue &SplatVal)
Definition: RISCVISelDAGToDAG.cpp:1686
Addr
uint64_t Addr
Definition: ELFObjHandler.cpp:80
llvm::SelectionDAGISel::FuncInfo
std::unique_ptr< FunctionLoweringInfo > FuncInfo
Definition: SelectionDAGISel.h:43
llvm::MachinePointerInfo
This class contains a discriminated union of information about pointers in memory operands,...
Definition: MachineMemOperand.h:38
llvm::SelectionDAG::getCopyFromReg
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, unsigned Reg, EVT VT)
Definition: SelectionDAG.h:761
llvm::SelectionDAGISel::IsLegalToFold
static bool IsLegalToFold(SDValue N, SDNode *U, SDNode *Root, CodeGenOpt::Level OptLevel, bool IgnoreChains=false)
IsLegalToFold - Returns true if the specific operand node N of U can be folded during instruction sel...
Definition: SelectionDAGISel.cpp:2214
llvm::SDNode::getOperand
const SDValue & getOperand(unsigned Num) const
Definition: SelectionDAGNodes.h:904
I
#define I(x, y, z)
Definition: MD5.cpp:59
llvm::SelectionDAG::getNode
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
Definition: SelectionDAG.cpp:8316
llvm::RISCVISD::ROLW
@ ROLW
Definition: RISCVISelLowering.h:59
llvm::RISCVMachineFunctionInfo
RISCVMachineFunctionInfo - This class is derived from MachineFunctionInfo and contains private RISCV-...
Definition: RISCVMachineFunctionInfo.h:24
llvm::RISCVSubtarget
Definition: RISCVSubtarget.h:35
llvm::isUInt< 32 >
constexpr bool isUInt< 32 >(uint64_t x)
Definition: MathExtras.h:411
llvm::SDValue::getValue
SDValue getValue(unsigned R) const
Definition: SelectionDAGNodes.h:172
llvm::RISCVDAGToDAGISel::selectVLSEG
void selectVLSEG(SDNode *Node, bool IsMasked, bool IsStrided)
Definition: RISCVISelDAGToDAG.cpp:250
assert
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
createTupleImpl
static SDValue createTupleImpl(SelectionDAG &CurDAG, ArrayRef< SDValue > Regs, unsigned RegClassID, unsigned SubReg0)
Definition: RISCVISelDAGToDAG.cpp:153
llvm::MVT::Other
@ Other
Definition: MachineValueType.h:42
llvm::MVT::getSizeInBits
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
Definition: MachineValueType.h:860
llvm::SPII::Store
@ Store
Definition: SparcInstrInfo.h:33
llvm::SelectionDAGISel::CurDAG
SelectionDAG * CurDAG
Definition: SelectionDAGISel.h:47
llvm::RISCVDAGToDAGISel::hasAllWUsers
bool hasAllWUsers(SDNode *Node) const
Definition: RISCVISelDAGToDAG.h:63
llvm::SelectionDAG::getMachineNode
MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s),...
Definition: SelectionDAG.cpp:8989
llvm::MVT
Machine Value Type.
Definition: MachineValueType.h:31
llvm::RISCVISD::SRAW
@ SRAW
Definition: RISCVISelLowering.h:49
llvm::RISCVDAGToDAGISel::selectVSplatSimm5
bool selectVSplatSimm5(SDValue N, SDValue &SplatVal)
Definition: RISCVISelDAGToDAG.cpp:1667
llvm::SelectionDAG::setNodeMemRefs
void setNodeMemRefs(MachineSDNode *N, ArrayRef< MachineMemOperand * > NewMemRefs)
Mutate the specified machine node's memory references to the provided list.
Definition: SelectionDAG.cpp:8757
llvm::RISCVSubtarget::hasStdExtZba
bool hasStdExtZba() const
Definition: RISCVSubtarget.h:111
llvm::MachinePointerInfo::getWithOffset
MachinePointerInfo getWithOffset(int64_t O) const
Definition: MachineMemOperand.h:80
llvm::APInt
Class for arbitrary precision integers.
Definition: APInt.h:75
llvm::MachineFunction
Definition: MachineFunction.h:230
llvm::RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs
static std::pair< unsigned, unsigned > decomposeSubvectorInsertExtractToSubRegs(MVT VecVT, MVT SubVecVT, unsigned InsertExtractIdx, const RISCVRegisterInfo *TRI)
Definition: RISCVISelLowering.cpp:1276
llvm::RISCVISD::REMUW
@ REMUW
Definition: RISCVISelLowering.h:56
llvm::SelectionDAG::getTargetConstantPool
SDValue getTargetConstantPool(const Constant *C, EVT VT, MaybeAlign Align=None, int Offset=0, unsigned TargetFlags=0)
Definition: SelectionDAG.h:699
llvm::ArrayRef
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: APInt.h:32
llvm::createRISCVISelDag
FunctionPass * createRISCVISelDag(RISCVTargetMachine &TM)
Definition: RISCVISelDAGToDAG.cpp:1883
llvm::MVT::i64
@ i64
Definition: MachineValueType.h:47
llvm::SelectionDAG::getTargetInsertSubreg
SDValue getTargetInsertSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand, SDValue Subreg)
A convenience function for creating TargetInstrInfo::INSERT_SUBREG nodes.
Definition: SelectionDAG.cpp:9117
llvm::RISCVISD::VMV_V_X_VL
@ VMV_V_X_VL
Definition: RISCVISelLowering.h:124
llvm::RISCVSubtarget::getRegisterInfo
const RISCVRegisterInfo * getRegisterInfo() const override
Definition: RISCVSubtarget.h:95
llvm::SDValue::getMachineOpcode
unsigned getMachineOpcode() const
Definition: SelectionDAGNodes.h:1153
llvm_unreachable
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Definition: ErrorHandling.h:136
llvm::RISCVII::LMUL_2
@ LMUL_2
Definition: RISCVBaseInfo.h:97
llvm::SelectionDAG::ReplaceAllUsesOfValueWith
void ReplaceAllUsesOfValueWith(SDValue From, SDValue To)
Replace any uses of From with To, leaving uses of other values produced by From.getNode() alone.
Definition: SelectionDAG.cpp:9577
llvm::X86ISD::FLD
@ FLD
This instruction implements an extending load to FP stack slots.
Definition: X86ISelLowering.h:840
llvm::SDValue::getOperand
const SDValue & getOperand(unsigned i) const
Definition: SelectionDAGNodes.h:1129
DL
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Definition: AArch64SLSHardening.cpp:76
llvm::SDValue::hasOneUse
bool hasOneUse() const
Return true if there is exactly one node using value ResNo of Node.
Definition: SelectionDAGNodes.h:1165
llvm::SDValue::getSimpleValueType
MVT getSimpleValueType() const
Return the simple ValueType of the referenced return value.
Definition: SelectionDAGNodes.h:183
llvm::SDVTList
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
Definition: SelectionDAGNodes.h:79
llvm::SignExtend64
constexpr int64_t SignExtend64(uint64_t x)
Sign-extend the number in the bottom B bits of X to a 64-bit integer.
Definition: MathExtras.h:777
llvm::MachineMemOperand::MOLoad
@ MOLoad
The memory access reads data.
Definition: MachineMemOperand.h:135
llvm::ISD::INTRINSIC_WO_CHAIN
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
Definition: ISDOpcodes.h:184
llvm::MVT::getVectorElementCount
ElementCount getVectorElementCount() const
Definition: MachineValueType.h:846
llvm::RISCVISD::RORW
@ RORW
Definition: RISCVISelLowering.h:60
createM4Tuple
static SDValue createM4Tuple(SelectionDAG &CurDAG, ArrayRef< SDValue > Regs, unsigned NF)
Definition: RISCVISelDAGToDAG.cpp:190
llvm::ISD::FrameIndex
@ FrameIndex
Definition: ISDOpcodes.h:80
llvm::ISD::INSERT_SUBVECTOR
@ INSERT_SUBVECTOR
INSERT_SUBVECTOR(VECTOR1, VECTOR2, IDX) - Returns a vector with VECTOR2 inserted into VECTOR1.
Definition: ISDOpcodes.h:535
llvm::HexagonISD::CP
@ CP
Definition: HexagonISelLowering.h:53
llvm::SelectionDAGISel::MF
MachineFunction * MF
Definition: SelectionDAGISel.h:45
Alignment.h
llvm::SelectionDAG::computeKnownBits
KnownBits computeKnownBits(SDValue Op, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in Known.
Definition: SelectionDAG.cpp:2757
llvm::KnownBits
Definition: KnownBits.h:23
llvm::RISCVISD::SRLW
@ SRLW
Definition: RISCVISelLowering.h:50
llvm::ISD::EXTRACT_SUBVECTOR
@ EXTRACT_SUBVECTOR
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR.
Definition: ISDOpcodes.h:549
llvm::isNullConstant
bool isNullConstant(SDValue V)
Returns true if V is a constant integer zero.
Definition: SelectionDAG.cpp:9931
llvm::AMDGPU::SendMsg::Op
Op
Definition: SIDefines.h:321
llvm::RISCV::VSXSEGPseudo
Definition: RISCVISelDAGToDAG.h:150
RISCVISelLowering.h
llvm::RISCVDAGToDAGISel::PostprocessISelDAG
void PostprocessISelDAG() override
PostprocessISelDAG() - This hook allows the target to hack on the graph right after selection.
Definition: RISCVISelDAGToDAG.cpp:110
llvm::ilist_iterator
Iterator for intrusive lists based on ilist_node.
Definition: ilist_iterator.h:57
MachineFrameInfo.h
llvm::SelectionDAG::getEntryNode
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
Definition: SelectionDAG.h:516
llvm::InlineAsm::Constraint_A
@ Constraint_A
Definition: InlineAsm.h:250
llvm::RISCVVType::encodeVTYPE
unsigned encodeVTYPE(RISCVII::VLMUL VLMUL, unsigned SEW, bool TailAgnostic, bool MaskAgnostic)
Definition: RISCVBaseInfo.cpp:110
llvm::SelectionDAG::getDataLayout
const DataLayout & getDataLayout() const
Definition: SelectionDAG.h:440
llvm::RISCVDAGToDAGISel::selectVLSEGFF
void selectVLSEGFF(SDNode *Node, bool IsMasked)
Definition: RISCVISelDAGToDAG.cpp:291
llvm::SelectionDAG::getTargetExtractSubreg
SDValue getTargetExtractSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand)
A convenience function for creating TargetInstrInfo::EXTRACT_SUBREG nodes.
Definition: SelectionDAG.cpp:9107
llvm::SelectionDAGISel::ReplaceUses
void ReplaceUses(SDValue F, SDValue T)
ReplaceUses - replace all uses of the old node F with the use of the new node T.
Definition: SelectionDAGISel.h:206
llvm::MVT::i32
@ i32
Definition: MachineValueType.h:46
llvm::RISCVSubtarget::getXLen
unsigned getXLen() const
Definition: RISCVSubtarget.h:132
llvm::SDValue
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
Definition: SelectionDAGNodes.h:138
llvm::RISCVTargetLowering
Definition: RISCVISelLowering.h:294
llvm::XCoreISD::LMUL
@ LMUL
Definition: XCoreISelLowering.h:59
llvm::countLeadingZeros
unsigned countLeadingZeros(T Val, ZeroBehavior ZB=ZB_Width)
Count number of 0's from the most significant bit to the least stopping at the first 1.
Definition: MathExtras.h:225
llvm::RISCVTargetLowering::getLMUL
static RISCVII::VLMUL getLMUL(MVT VT)
Definition: RISCVISelLowering.cpp:1197
llvm::AMDGPU::Hwreg::Width
Width
Definition: SIDefines.h:410
llvm::ISD::ADD
@ ADD
Simple integer binary arithmetic operators.
Definition: ISDOpcodes.h:239
llvm::RISCVISD::VFMV_V_F_VL
@ VFMV_V_F_VL
Definition: RISCVISelLowering.h:127
llvm::SDValue::isUndef
bool isUndef() const
Definition: SelectionDAGNodes.h:1157
llvm::RISCVII::LMUL_F8
@ LMUL_F8
Definition: RISCVBaseInfo.h:101
llvm::ISD::SHL
@ SHL
Shift and rotation operations.
Definition: ISDOpcodes.h:657
llvm::MachinePointerInfo::getFixedStack
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
Definition: MachineOperand.cpp:1003
llvm::ISD::MUL
@ MUL
Definition: ISDOpcodes.h:241
N
#define N
llvm::ISD::SRL
@ SRL
Definition: ISDOpcodes.h:659
RISCVMachineFunctionInfo.h
llvm::RISCVDAGToDAGISel::selectRVVSimm5
bool selectRVVSimm5(SDValue N, unsigned Width, SDValue &Imm)
Definition: RISCVISelDAGToDAG.cpp:1704
llvm::ArrayRef::size
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:165
llvm::RISCVII::LMUL_F4
@ LMUL_F4
Definition: RISCVBaseInfo.h:102
llvm::RISCVDAGToDAGISel::Select
void Select(SDNode *Node) override
Main hook for targets to transform nodes into machine nodes.
Definition: RISCVISelDAGToDAG.cpp:452
llvm::RISCVII::VLMUL
VLMUL
Definition: RISCVBaseInfo.h:95
llvm::MVT::Untyped
@ Untyped
Definition: MachineValueType.h:266
llvm::SmallVectorImpl
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:43
llvm::SDValue::getOpcode
unsigned getOpcode() const
Definition: SelectionDAGNodes.h:1117
llvm::SelectionDAG::getTargetConstant
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
Definition: SelectionDAG.h:637
TM
const char LLVMTargetMachineRef TM
Definition: PassBuilderBindings.cpp:47
llvm::FunctionPass
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:298
llvm::ISD::INTRINSIC_W_CHAIN
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
Definition: ISDOpcodes.h:192
llvm::SelectionDAG::DeleteNode
void DeleteNode(SDNode *N)
Remove the specified node from the system.
Definition: SelectionDAG.cpp:895
llvm::SelectionDAG::getMachineFunction
MachineFunction & getMachineFunction() const
Definition: SelectionDAG.h:437
llvm::SelectionDAG::ComputeNumSignBits
unsigned ComputeNumSignBits(SDValue Op, unsigned Depth=0) const
Return the number of times the sign bit of the register is replicated into the other bits.
Definition: SelectionDAG.cpp:3686
llvm::isMask_64
constexpr bool isMask_64(uint64_t Value)
Return true if the argument is a non-empty sequence of ones starting at the least significant bit wit...
Definition: MathExtras.h:473
llvm::RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL
@ SPLAT_VECTOR_SPLIT_I64_VL
Definition: RISCVISelLowering.h:140
raw_ostream.h
llvm::SDValue::isMachineOpcode
bool isMachineOpcode() const
Definition: SelectionDAGNodes.h:1149
llvm::RISCV::VLEPseudo
Definition: RISCVISelDAGToDAG.h:160
llvm::RISCVDAGToDAGISel::hasAllHUsers
bool hasAllHUsers(SDNode *Node) const
Definition: RISCVISelDAGToDAG.h:62
ValidateFn
bool(*)(int64_t) ValidateFn
Definition: RISCVISelDAGToDAG.cpp:1632
llvm::RISCVISD::CTZW
@ CTZW
Definition: RISCVISelLowering.h:64
llvm::RISCVDAGToDAGISel::hasAllNBitUsers
bool hasAllNBitUsers(SDNode *Node, unsigned Bits) const
Definition: RISCVISelDAGToDAG.cpp:1536
Debug.h
llvm::RISCVDAGToDAGISel::selectSExti32
bool selectSExti32(SDValue N, SDValue &Val)
Definition: RISCVISelDAGToDAG.cpp:1492
llvm::TargetLoweringBase::getPointerTy
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
Definition: TargetLowering.h:346
llvm::ISD::TokenFactor
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
Definition: ISDOpcodes.h:52
llvm::RISCVII::LMUL_F2
@ LMUL_F2
Definition: RISCVBaseInfo.h:103
llvm::sampleprof::Base
@ Base
Definition: Discriminator.h:58
llvm::RISCVSubtarget::hasStdExtV
bool hasStdExtV() const
Definition: RISCVSubtarget.h:122