LLVM 18.0.0git
AArch64ISelDAGToDAG.cpp
Go to the documentation of this file.
1//===-- AArch64ISelDAGToDAG.cpp - A dag to dag inst selector for AArch64 --===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines an instruction selector for the AArch64 target.
10//
11//===----------------------------------------------------------------------===//
12
16#include "llvm/ADT/APSInt.h"
19#include "llvm/IR/Function.h" // To access function attributes.
20#include "llvm/IR/GlobalValue.h"
21#include "llvm/IR/Intrinsics.h"
22#include "llvm/IR/IntrinsicsAArch64.h"
23#include "llvm/Support/Debug.h"
28
29using namespace llvm;
30
31#define DEBUG_TYPE "aarch64-isel"
32#define PASS_NAME "AArch64 Instruction Selection"
33
34//===--------------------------------------------------------------------===//
35/// AArch64DAGToDAGISel - AArch64 specific code to select AArch64 machine
36/// instructions for SelectionDAG operations.
37///
38namespace {
39
40class AArch64DAGToDAGISel : public SelectionDAGISel {
41
42 /// Subtarget - Keep a pointer to the AArch64Subtarget around so that we can
43 /// make the right decision when generating code for different targets.
44 const AArch64Subtarget *Subtarget;
45
46public:
47 static char ID;
48
49 AArch64DAGToDAGISel() = delete;
50
51 explicit AArch64DAGToDAGISel(AArch64TargetMachine &tm,
52 CodeGenOptLevel OptLevel)
53 : SelectionDAGISel(ID, tm, OptLevel), Subtarget(nullptr) {}
54
55 bool runOnMachineFunction(MachineFunction &MF) override {
56 Subtarget = &MF.getSubtarget<AArch64Subtarget>();
58 }
59
60 void Select(SDNode *Node) override;
61
62 /// SelectInlineAsmMemoryOperand - Implement addressing mode selection for
63 /// inline asm expressions.
65 InlineAsm::ConstraintCode ConstraintID,
66 std::vector<SDValue> &OutOps) override;
67
68 template <signed Low, signed High, signed Scale>
69 bool SelectRDVLImm(SDValue N, SDValue &Imm);
70
71 bool SelectArithExtendedRegister(SDValue N, SDValue &Reg, SDValue &Shift);
72 bool SelectArithUXTXRegister(SDValue N, SDValue &Reg, SDValue &Shift);
73 bool SelectArithImmed(SDValue N, SDValue &Val, SDValue &Shift);
74 bool SelectNegArithImmed(SDValue N, SDValue &Val, SDValue &Shift);
75 bool SelectArithShiftedRegister(SDValue N, SDValue &Reg, SDValue &Shift) {
76 return SelectShiftedRegister(N, false, Reg, Shift);
77 }
78 bool SelectLogicalShiftedRegister(SDValue N, SDValue &Reg, SDValue &Shift) {
79 return SelectShiftedRegister(N, true, Reg, Shift);
80 }
81 bool SelectAddrModeIndexed7S8(SDValue N, SDValue &Base, SDValue &OffImm) {
82 return SelectAddrModeIndexed7S(N, 1, Base, OffImm);
83 }
84 bool SelectAddrModeIndexed7S16(SDValue N, SDValue &Base, SDValue &OffImm) {
85 return SelectAddrModeIndexed7S(N, 2, Base, OffImm);
86 }
87 bool SelectAddrModeIndexed7S32(SDValue N, SDValue &Base, SDValue &OffImm) {
88 return SelectAddrModeIndexed7S(N, 4, Base, OffImm);
89 }
90 bool SelectAddrModeIndexed7S64(SDValue N, SDValue &Base, SDValue &OffImm) {
91 return SelectAddrModeIndexed7S(N, 8, Base, OffImm);
92 }
93 bool SelectAddrModeIndexed7S128(SDValue N, SDValue &Base, SDValue &OffImm) {
94 return SelectAddrModeIndexed7S(N, 16, Base, OffImm);
95 }
96 bool SelectAddrModeIndexedS9S128(SDValue N, SDValue &Base, SDValue &OffImm) {
97 return SelectAddrModeIndexedBitWidth(N, true, 9, 16, Base, OffImm);
98 }
99 bool SelectAddrModeIndexedU6S128(SDValue N, SDValue &Base, SDValue &OffImm) {
100 return SelectAddrModeIndexedBitWidth(N, false, 6, 16, Base, OffImm);
101 }
102 bool SelectAddrModeIndexed8(SDValue N, SDValue &Base, SDValue &OffImm) {
103 return SelectAddrModeIndexed(N, 1, Base, OffImm);
104 }
105 bool SelectAddrModeIndexed16(SDValue N, SDValue &Base, SDValue &OffImm) {
106 return SelectAddrModeIndexed(N, 2, Base, OffImm);
107 }
108 bool SelectAddrModeIndexed32(SDValue N, SDValue &Base, SDValue &OffImm) {
109 return SelectAddrModeIndexed(N, 4, Base, OffImm);
110 }
111 bool SelectAddrModeIndexed64(SDValue N, SDValue &Base, SDValue &OffImm) {
112 return SelectAddrModeIndexed(N, 8, Base, OffImm);
113 }
114 bool SelectAddrModeIndexed128(SDValue N, SDValue &Base, SDValue &OffImm) {
115 return SelectAddrModeIndexed(N, 16, Base, OffImm);
116 }
117 bool SelectAddrModeUnscaled8(SDValue N, SDValue &Base, SDValue &OffImm) {
118 return SelectAddrModeUnscaled(N, 1, Base, OffImm);
119 }
120 bool SelectAddrModeUnscaled16(SDValue N, SDValue &Base, SDValue &OffImm) {
121 return SelectAddrModeUnscaled(N, 2, Base, OffImm);
122 }
123 bool SelectAddrModeUnscaled32(SDValue N, SDValue &Base, SDValue &OffImm) {
124 return SelectAddrModeUnscaled(N, 4, Base, OffImm);
125 }
126 bool SelectAddrModeUnscaled64(SDValue N, SDValue &Base, SDValue &OffImm) {
127 return SelectAddrModeUnscaled(N, 8, Base, OffImm);
128 }
129 bool SelectAddrModeUnscaled128(SDValue N, SDValue &Base, SDValue &OffImm) {
130 return SelectAddrModeUnscaled(N, 16, Base, OffImm);
131 }
132 template <unsigned Size, unsigned Max>
133 bool SelectAddrModeIndexedUImm(SDValue N, SDValue &Base, SDValue &OffImm) {
134 // Test if there is an appropriate addressing mode and check if the
135 // immediate fits.
136 bool Found = SelectAddrModeIndexed(N, Size, Base, OffImm);
137 if (Found) {
138 if (auto *CI = dyn_cast<ConstantSDNode>(OffImm)) {
139 int64_t C = CI->getSExtValue();
140 if (C <= Max)
141 return true;
142 }
143 }
144
145 // Otherwise, base only, materialize address in register.
146 Base = N;
147 OffImm = CurDAG->getTargetConstant(0, SDLoc(N), MVT::i64);
148 return true;
149 }
150
151 template<int Width>
152 bool SelectAddrModeWRO(SDValue N, SDValue &Base, SDValue &Offset,
153 SDValue &SignExtend, SDValue &DoShift) {
154 return SelectAddrModeWRO(N, Width / 8, Base, Offset, SignExtend, DoShift);
155 }
156
157 template<int Width>
158 bool SelectAddrModeXRO(SDValue N, SDValue &Base, SDValue &Offset,
159 SDValue &SignExtend, SDValue &DoShift) {
160 return SelectAddrModeXRO(N, Width / 8, Base, Offset, SignExtend, DoShift);
161 }
162
163 bool SelectExtractHigh(SDValue N, SDValue &Res) {
164 if (Subtarget->isLittleEndian() && N->getOpcode() == ISD::BITCAST)
165 N = N->getOperand(0);
166 if (N->getOpcode() != ISD::EXTRACT_SUBVECTOR ||
167 !isa<ConstantSDNode>(N->getOperand(1)))
168 return false;
169 EVT VT = N->getValueType(0);
170 EVT LVT = N->getOperand(0).getValueType();
171 unsigned Index = N->getConstantOperandVal(1);
172 if (!VT.is64BitVector() || !LVT.is128BitVector() ||
174 return false;
175 Res = N->getOperand(0);
176 return true;
177 }
178
179 bool SelectRoundingVLShr(SDValue N, SDValue &Res1, SDValue &Res2) {
180 if (N.getOpcode() != AArch64ISD::VLSHR)
181 return false;
182 SDValue Op = N->getOperand(0);
183 EVT VT = Op.getValueType();
184 unsigned ShtAmt = N->getConstantOperandVal(1);
185 if (ShtAmt > VT.getScalarSizeInBits() / 2 || Op.getOpcode() != ISD::ADD)
186 return false;
187
188 APInt Imm;
189 if (Op.getOperand(1).getOpcode() == AArch64ISD::MOVIshift)
190 Imm = APInt(VT.getScalarSizeInBits(),
191 Op.getOperand(1).getConstantOperandVal(0)
192 << Op.getOperand(1).getConstantOperandVal(1));
193 else if (Op.getOperand(1).getOpcode() == AArch64ISD::DUP &&
194 isa<ConstantSDNode>(Op.getOperand(1).getOperand(0)))
195 Imm = APInt(VT.getScalarSizeInBits(),
196 Op.getOperand(1).getConstantOperandVal(0));
197 else
198 return false;
199
200 if (Imm != 1ULL << (ShtAmt - 1))
201 return false;
202
203 Res1 = Op.getOperand(0);
204 Res2 = CurDAG->getTargetConstant(ShtAmt, SDLoc(N), MVT::i32);
205 return true;
206 }
207
208 bool SelectDupZeroOrUndef(SDValue N) {
209 switch(N->getOpcode()) {
210 case ISD::UNDEF:
211 return true;
212 case AArch64ISD::DUP:
213 case ISD::SPLAT_VECTOR: {
214 auto Opnd0 = N->getOperand(0);
215 if (isNullConstant(Opnd0))
216 return true;
217 if (isNullFPConstant(Opnd0))
218 return true;
219 break;
220 }
221 default:
222 break;
223 }
224
225 return false;
226 }
227
228 bool SelectDupZero(SDValue N) {
229 switch(N->getOpcode()) {
230 case AArch64ISD::DUP:
231 case ISD::SPLAT_VECTOR: {
232 auto Opnd0 = N->getOperand(0);
233 if (isNullConstant(Opnd0))
234 return true;
235 if (isNullFPConstant(Opnd0))
236 return true;
237 break;
238 }
239 }
240
241 return false;
242 }
243
244 bool SelectDupNegativeZero(SDValue N) {
245 switch(N->getOpcode()) {
246 case AArch64ISD::DUP:
247 case ISD::SPLAT_VECTOR: {
248 ConstantFPSDNode *Const = dyn_cast<ConstantFPSDNode>(N->getOperand(0));
249 return Const && Const->isZero() && Const->isNegative();
250 }
251 }
252
253 return false;
254 }
255
256 template<MVT::SimpleValueType VT>
257 bool SelectSVEAddSubImm(SDValue N, SDValue &Imm, SDValue &Shift) {
258 return SelectSVEAddSubImm(N, VT, Imm, Shift);
259 }
260
261 template <MVT::SimpleValueType VT>
262 bool SelectSVECpyDupImm(SDValue N, SDValue &Imm, SDValue &Shift) {
263 return SelectSVECpyDupImm(N, VT, Imm, Shift);
264 }
265
266 template <MVT::SimpleValueType VT, bool Invert = false>
267 bool SelectSVELogicalImm(SDValue N, SDValue &Imm) {
268 return SelectSVELogicalImm(N, VT, Imm, Invert);
269 }
270
271 template <MVT::SimpleValueType VT>
272 bool SelectSVEArithImm(SDValue N, SDValue &Imm) {
273 return SelectSVEArithImm(N, VT, Imm);
274 }
275
276 template <unsigned Low, unsigned High, bool AllowSaturation = false>
277 bool SelectSVEShiftImm(SDValue N, SDValue &Imm) {
278 return SelectSVEShiftImm(N, Low, High, AllowSaturation, Imm);
279 }
280
281 bool SelectSVEShiftSplatImmR(SDValue N, SDValue &Imm) {
282 if (N->getOpcode() != ISD::SPLAT_VECTOR)
283 return false;
284
285 EVT EltVT = N->getValueType(0).getVectorElementType();
286 return SelectSVEShiftImm(N->getOperand(0), /* Low */ 1,
287 /* High */ EltVT.getFixedSizeInBits(),
288 /* AllowSaturation */ true, Imm);
289 }
290
291 // Returns a suitable CNT/INC/DEC/RDVL multiplier to calculate VSCALE*N.
292 template<signed Min, signed Max, signed Scale, bool Shift>
293 bool SelectCntImm(SDValue N, SDValue &Imm) {
294 if (!isa<ConstantSDNode>(N))
295 return false;
296
297 int64_t MulImm = cast<ConstantSDNode>(N)->getSExtValue();
298 if (Shift)
299 MulImm = 1LL << MulImm;
300
301 if ((MulImm % std::abs(Scale)) != 0)
302 return false;
303
304 MulImm /= Scale;
305 if ((MulImm >= Min) && (MulImm <= Max)) {
306 Imm = CurDAG->getTargetConstant(MulImm, SDLoc(N), MVT::i32);
307 return true;
308 }
309
310 return false;
311 }
312
313 template <signed Max, signed Scale>
314 bool SelectEXTImm(SDValue N, SDValue &Imm) {
315 if (!isa<ConstantSDNode>(N))
316 return false;
317
318 int64_t MulImm = cast<ConstantSDNode>(N)->getSExtValue();
319
320 if (MulImm >= 0 && MulImm <= Max) {
321 MulImm *= Scale;
322 Imm = CurDAG->getTargetConstant(MulImm, SDLoc(N), MVT::i32);
323 return true;
324 }
325
326 return false;
327 }
328
329 template <unsigned BaseReg> bool ImmToTile(SDValue N, SDValue &Imm) {
330 if (auto *CI = dyn_cast<ConstantSDNode>(N)) {
331 uint64_t C = CI->getZExtValue();
332 Imm = CurDAG->getRegister(BaseReg + C, MVT::Other);
333 return true;
334 }
335 return false;
336 }
337
338 /// Form sequences of consecutive 64/128-bit registers for use in NEON
339 /// instructions making use of a vector-list (e.g. ldN, tbl). Vecs must have
340 /// between 1 and 4 elements. If it contains a single element that is returned
341 /// unchanged; otherwise a REG_SEQUENCE value is returned.
344 // Form a sequence of SVE registers for instructions using list of vectors,
345 // e.g. structured loads and stores (ldN, stN).
346 SDValue createZTuple(ArrayRef<SDValue> Vecs);
347
348 // Similar to above, except the register must start at a multiple of the
349 // tuple, e.g. z2 for a 2-tuple, or z8 for a 4-tuple.
350 SDValue createZMulTuple(ArrayRef<SDValue> Regs);
351
352 /// Generic helper for the createDTuple/createQTuple
353 /// functions. Those should almost always be called instead.
354 SDValue createTuple(ArrayRef<SDValue> Vecs, const unsigned RegClassIDs[],
355 const unsigned SubRegs[]);
356
357 void SelectTable(SDNode *N, unsigned NumVecs, unsigned Opc, bool isExt);
358
359 bool tryIndexedLoad(SDNode *N);
360
361 bool trySelectStackSlotTagP(SDNode *N);
362 void SelectTagP(SDNode *N);
363
364 void SelectLoad(SDNode *N, unsigned NumVecs, unsigned Opc,
365 unsigned SubRegIdx);
366 void SelectPostLoad(SDNode *N, unsigned NumVecs, unsigned Opc,
367 unsigned SubRegIdx);
368 void SelectLoadLane(SDNode *N, unsigned NumVecs, unsigned Opc);
369 void SelectPostLoadLane(SDNode *N, unsigned NumVecs, unsigned Opc);
370 void SelectPredicatedLoad(SDNode *N, unsigned NumVecs, unsigned Scale,
371 unsigned Opc_rr, unsigned Opc_ri,
372 bool IsIntr = false);
373 void SelectContiguousMultiVectorLoad(SDNode *N, unsigned NumVecs,
374 unsigned Scale, unsigned Opc_ri,
375 unsigned Opc_rr);
376 void SelectDestructiveMultiIntrinsic(SDNode *N, unsigned NumVecs,
377 bool IsZmMulti, unsigned Opcode,
378 bool HasPred = false);
379 void SelectPExtPair(SDNode *N, unsigned Opc);
380 void SelectWhilePair(SDNode *N, unsigned Opc);
381 void SelectCVTIntrinsic(SDNode *N, unsigned NumVecs, unsigned Opcode);
382 void SelectClamp(SDNode *N, unsigned NumVecs, unsigned Opcode);
383 void SelectUnaryMultiIntrinsic(SDNode *N, unsigned NumOutVecs,
384 bool IsTupleInput, unsigned Opc);
385 void SelectFrintFromVT(SDNode *N, unsigned NumVecs, unsigned Opcode);
386
387 template <unsigned MaxIdx, unsigned Scale>
388 void SelectMultiVectorMove(SDNode *N, unsigned NumVecs, unsigned BaseReg,
389 unsigned Op);
390
391 bool SelectAddrModeFrameIndexSVE(SDValue N, SDValue &Base, SDValue &OffImm);
392 /// SVE Reg+Imm addressing mode.
393 template <int64_t Min, int64_t Max>
394 bool SelectAddrModeIndexedSVE(SDNode *Root, SDValue N, SDValue &Base,
395 SDValue &OffImm);
396 /// SVE Reg+Reg address mode.
397 template <unsigned Scale>
398 bool SelectSVERegRegAddrMode(SDValue N, SDValue &Base, SDValue &Offset) {
399 return SelectSVERegRegAddrMode(N, Scale, Base, Offset);
400 }
401
402 template <unsigned MaxIdx, unsigned Scale>
403 bool SelectSMETileSlice(SDValue N, SDValue &Vector, SDValue &Offset) {
404 return SelectSMETileSlice(N, MaxIdx, Vector, Offset, Scale);
405 }
406
407 void SelectStore(SDNode *N, unsigned NumVecs, unsigned Opc);
408 void SelectPostStore(SDNode *N, unsigned NumVecs, unsigned Opc);
409 void SelectStoreLane(SDNode *N, unsigned NumVecs, unsigned Opc);
410 void SelectPostStoreLane(SDNode *N, unsigned NumVecs, unsigned Opc);
411 void SelectPredicatedStore(SDNode *N, unsigned NumVecs, unsigned Scale,
412 unsigned Opc_rr, unsigned Opc_ri);
413 std::tuple<unsigned, SDValue, SDValue>
414 findAddrModeSVELoadStore(SDNode *N, unsigned Opc_rr, unsigned Opc_ri,
415 const SDValue &OldBase, const SDValue &OldOffset,
416 unsigned Scale);
417
418 bool tryBitfieldExtractOp(SDNode *N);
419 bool tryBitfieldExtractOpFromSExt(SDNode *N);
420 bool tryBitfieldInsertOp(SDNode *N);
421 bool tryBitfieldInsertInZeroOp(SDNode *N);
422 bool tryShiftAmountMod(SDNode *N);
423
424 bool tryReadRegister(SDNode *N);
425 bool tryWriteRegister(SDNode *N);
426
427 bool trySelectCastFixedLengthToScalableVector(SDNode *N);
428 bool trySelectCastScalableToFixedLengthVector(SDNode *N);
429
430// Include the pieces autogenerated from the target description.
431#include "AArch64GenDAGISel.inc"
432
433private:
434 bool SelectShiftedRegister(SDValue N, bool AllowROR, SDValue &Reg,
435 SDValue &Shift);
436 bool SelectShiftedRegisterFromAnd(SDValue N, SDValue &Reg, SDValue &Shift);
437 bool SelectAddrModeIndexed7S(SDValue N, unsigned Size, SDValue &Base,
438 SDValue &OffImm) {
439 return SelectAddrModeIndexedBitWidth(N, true, 7, Size, Base, OffImm);
440 }
441 bool SelectAddrModeIndexedBitWidth(SDValue N, bool IsSignedImm, unsigned BW,
442 unsigned Size, SDValue &Base,
443 SDValue &OffImm);
444 bool SelectAddrModeIndexed(SDValue N, unsigned Size, SDValue &Base,
445 SDValue &OffImm);
446 bool SelectAddrModeUnscaled(SDValue N, unsigned Size, SDValue &Base,
447 SDValue &OffImm);
448 bool SelectAddrModeWRO(SDValue N, unsigned Size, SDValue &Base,
449 SDValue &Offset, SDValue &SignExtend,
450 SDValue &DoShift);
451 bool SelectAddrModeXRO(SDValue N, unsigned Size, SDValue &Base,
452 SDValue &Offset, SDValue &SignExtend,
453 SDValue &DoShift);
454 bool isWorthFoldingALU(SDValue V, bool LSL = false) const;
455 bool isWorthFoldingAddr(SDValue V) const;
456 bool SelectExtendedSHL(SDValue N, unsigned Size, bool WantExtend,
457 SDValue &Offset, SDValue &SignExtend);
458
459 template<unsigned RegWidth>
460 bool SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos) {
461 return SelectCVTFixedPosOperand(N, FixedPos, RegWidth);
462 }
463
464 bool SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos, unsigned Width);
465
466 template<unsigned RegWidth>
467 bool SelectCVTFixedPosRecipOperand(SDValue N, SDValue &FixedPos) {
468 return SelectCVTFixedPosRecipOperand(N, FixedPos, RegWidth);
469 }
470
471 bool SelectCVTFixedPosRecipOperand(SDValue N, SDValue &FixedPos,
472 unsigned Width);
473
474 bool SelectCMP_SWAP(SDNode *N);
475
476 bool SelectSVEAddSubImm(SDValue N, MVT VT, SDValue &Imm, SDValue &Shift);
477 bool SelectSVECpyDupImm(SDValue N, MVT VT, SDValue &Imm, SDValue &Shift);
478 bool SelectSVELogicalImm(SDValue N, MVT VT, SDValue &Imm, bool Invert);
479
480 bool SelectSVESignedArithImm(SDValue N, SDValue &Imm);
481 bool SelectSVEShiftImm(SDValue N, uint64_t Low, uint64_t High,
482 bool AllowSaturation, SDValue &Imm);
483
484 bool SelectSVEArithImm(SDValue N, MVT VT, SDValue &Imm);
485 bool SelectSVERegRegAddrMode(SDValue N, unsigned Scale, SDValue &Base,
486 SDValue &Offset);
487 bool SelectSMETileSlice(SDValue N, unsigned MaxSize, SDValue &Vector,
488 SDValue &Offset, unsigned Scale = 1);
489
490 bool SelectAllActivePredicate(SDValue N);
491 bool SelectAnyPredicate(SDValue N);
492};
493} // end anonymous namespace
494
495char AArch64DAGToDAGISel::ID = 0;
496
497INITIALIZE_PASS(AArch64DAGToDAGISel, DEBUG_TYPE, PASS_NAME, false, false)
498
499/// isIntImmediate - This method tests to see if the node is a constant
500/// operand. If so Imm will receive the 32-bit value.
501static bool isIntImmediate(const SDNode *N, uint64_t &Imm) {
502 if (const ConstantSDNode *C = dyn_cast<const ConstantSDNode>(N)) {
503 Imm = C->getZExtValue();
504 return true;
505 }
506 return false;
507}
508
509// isIntImmediate - This method tests to see if a constant operand.
510// If so Imm will receive the value.
511static bool isIntImmediate(SDValue N, uint64_t &Imm) {
512 return isIntImmediate(N.getNode(), Imm);
513}
514
515// isOpcWithIntImmediate - This method tests to see if the node is a specific
516// opcode and that it has a immediate integer right operand.
517// If so Imm will receive the 32 bit value.
518static bool isOpcWithIntImmediate(const SDNode *N, unsigned Opc,
519 uint64_t &Imm) {
520 return N->getOpcode() == Opc &&
521 isIntImmediate(N->getOperand(1).getNode(), Imm);
522}
523
524// isIntImmediateEq - This method tests to see if N is a constant operand that
525// is equivalent to 'ImmExpected'.
526#ifndef NDEBUG
527static bool isIntImmediateEq(SDValue N, const uint64_t ImmExpected) {
528 uint64_t Imm;
529 if (!isIntImmediate(N.getNode(), Imm))
530 return false;
531 return Imm == ImmExpected;
532}
533#endif
534
535bool AArch64DAGToDAGISel::SelectInlineAsmMemoryOperand(
536 const SDValue &Op, const InlineAsm::ConstraintCode ConstraintID,
537 std::vector<SDValue> &OutOps) {
538 switch(ConstraintID) {
539 default:
540 llvm_unreachable("Unexpected asm memory constraint");
541 case InlineAsm::ConstraintCode::m:
542 case InlineAsm::ConstraintCode::o:
543 case InlineAsm::ConstraintCode::Q:
544 // We need to make sure that this one operand does not end up in XZR, thus
545 // require the address to be in a PointerRegClass register.
546 const TargetRegisterInfo *TRI = Subtarget->getRegisterInfo();
547 const TargetRegisterClass *TRC = TRI->getPointerRegClass(*MF);
548 SDLoc dl(Op);
549 SDValue RC = CurDAG->getTargetConstant(TRC->getID(), dl, MVT::i64);
550 SDValue NewOp =
551 SDValue(CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS,
552 dl, Op.getValueType(),
553 Op, RC), 0);
554 OutOps.push_back(NewOp);
555 return false;
556 }
557 return true;
558}
559
560/// SelectArithImmed - Select an immediate value that can be represented as
561/// a 12-bit value shifted left by either 0 or 12. If so, return true with
562/// Val set to the 12-bit value and Shift set to the shifter operand.
563bool AArch64DAGToDAGISel::SelectArithImmed(SDValue N, SDValue &Val,
564 SDValue &Shift) {
565 // This function is called from the addsub_shifted_imm ComplexPattern,
566 // which lists [imm] as the list of opcode it's interested in, however
567 // we still need to check whether the operand is actually an immediate
568 // here because the ComplexPattern opcode list is only used in
569 // root-level opcode matching.
570 if (!isa<ConstantSDNode>(N.getNode()))
571 return false;
572
573 uint64_t Immed = cast<ConstantSDNode>(N.getNode())->getZExtValue();
574 unsigned ShiftAmt;
575
576 if (Immed >> 12 == 0) {
577 ShiftAmt = 0;
578 } else if ((Immed & 0xfff) == 0 && Immed >> 24 == 0) {
579 ShiftAmt = 12;
580 Immed = Immed >> 12;
581 } else
582 return false;
583
584 unsigned ShVal = AArch64_AM::getShifterImm(AArch64_AM::LSL, ShiftAmt);
585 SDLoc dl(N);
586 Val = CurDAG->getTargetConstant(Immed, dl, MVT::i32);
587 Shift = CurDAG->getTargetConstant(ShVal, dl, MVT::i32);
588 return true;
589}
590
591/// SelectNegArithImmed - As above, but negates the value before trying to
592/// select it.
593bool AArch64DAGToDAGISel::SelectNegArithImmed(SDValue N, SDValue &Val,
594 SDValue &Shift) {
595 // This function is called from the addsub_shifted_imm ComplexPattern,
596 // which lists [imm] as the list of opcode it's interested in, however
597 // we still need to check whether the operand is actually an immediate
598 // here because the ComplexPattern opcode list is only used in
599 // root-level opcode matching.
600 if (!isa<ConstantSDNode>(N.getNode()))
601 return false;
602
603 // The immediate operand must be a 24-bit zero-extended immediate.
604 uint64_t Immed = cast<ConstantSDNode>(N.getNode())->getZExtValue();
605
606 // This negation is almost always valid, but "cmp wN, #0" and "cmn wN, #0"
607 // have the opposite effect on the C flag, so this pattern mustn't match under
608 // those circumstances.
609 if (Immed == 0)
610 return false;
611
612 if (N.getValueType() == MVT::i32)
613 Immed = ~((uint32_t)Immed) + 1;
614 else
615 Immed = ~Immed + 1ULL;
616 if (Immed & 0xFFFFFFFFFF000000ULL)
617 return false;
618
619 Immed &= 0xFFFFFFULL;
620 return SelectArithImmed(CurDAG->getConstant(Immed, SDLoc(N), MVT::i32), Val,
621 Shift);
622}
623
624/// getShiftTypeForNode - Translate a shift node to the corresponding
625/// ShiftType value.
627 switch (N.getOpcode()) {
628 default:
630 case ISD::SHL:
631 return AArch64_AM::LSL;
632 case ISD::SRL:
633 return AArch64_AM::LSR;
634 case ISD::SRA:
635 return AArch64_AM::ASR;
636 case ISD::ROTR:
637 return AArch64_AM::ROR;
638 }
639}
640
641/// Determine whether it is worth it to fold SHL into the addressing
642/// mode.
644 assert(V.getOpcode() == ISD::SHL && "invalid opcode");
645 // It is worth folding logical shift of up to three places.
646 auto *CSD = dyn_cast<ConstantSDNode>(V.getOperand(1));
647 if (!CSD)
648 return false;
649 unsigned ShiftVal = CSD->getZExtValue();
650 if (ShiftVal > 3)
651 return false;
652
653 // Check if this particular node is reused in any non-memory related
654 // operation. If yes, do not try to fold this node into the address
655 // computation, since the computation will be kept.
656 const SDNode *Node = V.getNode();
657 for (SDNode *UI : Node->uses())
658 if (!isa<MemSDNode>(*UI))
659 for (SDNode *UII : UI->uses())
660 if (!isa<MemSDNode>(*UII))
661 return false;
662 return true;
663}
664
665/// Determine whether it is worth to fold V into an extended register addressing
666/// mode.
667bool AArch64DAGToDAGISel::isWorthFoldingAddr(SDValue V) const {
668 // Trivial if we are optimizing for code size or if there is only
669 // one use of the value.
670 if (CurDAG->shouldOptForSize() || V.hasOneUse())
671 return true;
672 // If a subtarget has a fastpath LSL we can fold a logical shift into
673 // the addressing mode and save a cycle.
674 if (Subtarget->hasAddrLSLFast() && V.getOpcode() == ISD::SHL &&
676 return true;
677 if (Subtarget->hasAddrLSLFast() && V.getOpcode() == ISD::ADD) {
678 const SDValue LHS = V.getOperand(0);
679 const SDValue RHS = V.getOperand(1);
680 if (LHS.getOpcode() == ISD::SHL && isWorthFoldingSHL(LHS))
681 return true;
682 if (RHS.getOpcode() == ISD::SHL && isWorthFoldingSHL(RHS))
683 return true;
684 }
685
686 // It hurts otherwise, since the value will be reused.
687 return false;
688}
689
690/// and (shl/srl/sra, x, c), mask --> shl (srl/sra, x, c1), c2
691/// to select more shifted register
692bool AArch64DAGToDAGISel::SelectShiftedRegisterFromAnd(SDValue N, SDValue &Reg,
693 SDValue &Shift) {
694 EVT VT = N.getValueType();
695 if (VT != MVT::i32 && VT != MVT::i64)
696 return false;
697
698 if (N->getOpcode() != ISD::AND || !N->hasOneUse())
699 return false;
700 SDValue LHS = N.getOperand(0);
701 if (!LHS->hasOneUse())
702 return false;
703
704 unsigned LHSOpcode = LHS->getOpcode();
705 if (LHSOpcode != ISD::SHL && LHSOpcode != ISD::SRL && LHSOpcode != ISD::SRA)
706 return false;
707
708 ConstantSDNode *ShiftAmtNode = dyn_cast<ConstantSDNode>(LHS.getOperand(1));
709 if (!ShiftAmtNode)
710 return false;
711
712 uint64_t ShiftAmtC = ShiftAmtNode->getZExtValue();
713 ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(N.getOperand(1));
714 if (!RHSC)
715 return false;
716
717 APInt AndMask = RHSC->getAPIntValue();
718 unsigned LowZBits, MaskLen;
719 if (!AndMask.isShiftedMask(LowZBits, MaskLen))
720 return false;
721
722 unsigned BitWidth = N.getValueSizeInBits();
723 SDLoc DL(LHS);
724 uint64_t NewShiftC;
725 unsigned NewShiftOp;
726 if (LHSOpcode == ISD::SHL) {
727 // LowZBits <= ShiftAmtC will fall into isBitfieldPositioningOp
728 // BitWidth != LowZBits + MaskLen doesn't match the pattern
729 if (LowZBits <= ShiftAmtC || (BitWidth != LowZBits + MaskLen))
730 return false;
731
732 NewShiftC = LowZBits - ShiftAmtC;
733 NewShiftOp = VT == MVT::i64 ? AArch64::UBFMXri : AArch64::UBFMWri;
734 } else {
735 if (LowZBits == 0)
736 return false;
737
738 // NewShiftC >= BitWidth will fall into isBitfieldExtractOp
739 NewShiftC = LowZBits + ShiftAmtC;
740 if (NewShiftC >= BitWidth)
741 return false;
742
743 // SRA need all high bits
744 if (LHSOpcode == ISD::SRA && (BitWidth != (LowZBits + MaskLen)))
745 return false;
746
747 // SRL high bits can be 0 or 1
748 if (LHSOpcode == ISD::SRL && (BitWidth > (NewShiftC + MaskLen)))
749 return false;
750
751 if (LHSOpcode == ISD::SRL)
752 NewShiftOp = VT == MVT::i64 ? AArch64::UBFMXri : AArch64::UBFMWri;
753 else
754 NewShiftOp = VT == MVT::i64 ? AArch64::SBFMXri : AArch64::SBFMWri;
755 }
756
757 assert(NewShiftC < BitWidth && "Invalid shift amount");
758 SDValue NewShiftAmt = CurDAG->getTargetConstant(NewShiftC, DL, VT);
759 SDValue BitWidthMinus1 = CurDAG->getTargetConstant(BitWidth - 1, DL, VT);
760 Reg = SDValue(CurDAG->getMachineNode(NewShiftOp, DL, VT, LHS->getOperand(0),
761 NewShiftAmt, BitWidthMinus1),
762 0);
763 unsigned ShVal = AArch64_AM::getShifterImm(AArch64_AM::LSL, LowZBits);
764 Shift = CurDAG->getTargetConstant(ShVal, DL, MVT::i32);
765 return true;
766}
767
768/// getExtendTypeForNode - Translate an extend node to the corresponding
769/// ExtendType value.
771getExtendTypeForNode(SDValue N, bool IsLoadStore = false) {
772 if (N.getOpcode() == ISD::SIGN_EXTEND ||
773 N.getOpcode() == ISD::SIGN_EXTEND_INREG) {
774 EVT SrcVT;
775 if (N.getOpcode() == ISD::SIGN_EXTEND_INREG)
776 SrcVT = cast<VTSDNode>(N.getOperand(1))->getVT();
777 else
778 SrcVT = N.getOperand(0).getValueType();
779
780 if (!IsLoadStore && SrcVT == MVT::i8)
781 return AArch64_AM::SXTB;
782 else if (!IsLoadStore && SrcVT == MVT::i16)
783 return AArch64_AM::SXTH;
784 else if (SrcVT == MVT::i32)
785 return AArch64_AM::SXTW;
786 assert(SrcVT != MVT::i64 && "extend from 64-bits?");
787
789 } else if (N.getOpcode() == ISD::ZERO_EXTEND ||
790 N.getOpcode() == ISD::ANY_EXTEND) {
791 EVT SrcVT = N.getOperand(0).getValueType();
792 if (!IsLoadStore && SrcVT == MVT::i8)
793 return AArch64_AM::UXTB;
794 else if (!IsLoadStore && SrcVT == MVT::i16)
795 return AArch64_AM::UXTH;
796 else if (SrcVT == MVT::i32)
797 return AArch64_AM::UXTW;
798 assert(SrcVT != MVT::i64 && "extend from 64-bits?");
799
801 } else if (N.getOpcode() == ISD::AND) {
802 ConstantSDNode *CSD = dyn_cast<ConstantSDNode>(N.getOperand(1));
803 if (!CSD)
805 uint64_t AndMask = CSD->getZExtValue();
806
807 switch (AndMask) {
808 default:
810 case 0xFF:
811 return !IsLoadStore ? AArch64_AM::UXTB : AArch64_AM::InvalidShiftExtend;
812 case 0xFFFF:
813 return !IsLoadStore ? AArch64_AM::UXTH : AArch64_AM::InvalidShiftExtend;
814 case 0xFFFFFFFF:
815 return AArch64_AM::UXTW;
816 }
817 }
818
820}
821
822/// Determine whether it is worth to fold V into an extended register of an
823/// Add/Sub. LSL means we are folding into an `add w0, w1, w2, lsl #N`
824/// instruction, and the shift should be treated as worth folding even if has
825/// multiple uses.
826bool AArch64DAGToDAGISel::isWorthFoldingALU(SDValue V, bool LSL) const {
827 // Trivial if we are optimizing for code size or if there is only
828 // one use of the value.
829 if (CurDAG->shouldOptForSize() || V.hasOneUse())
830 return true;
831
832 // If a subtarget has a fastpath LSL we can fold a logical shift into
833 // the add/sub and save a cycle.
834 if (LSL && Subtarget->hasALULSLFast() && V.getOpcode() == ISD::SHL &&
835 V.getConstantOperandVal(1) <= 4 &&
837 return true;
838
839 // It hurts otherwise, since the value will be reused.
840 return false;
841}
842
843/// SelectShiftedRegister - Select a "shifted register" operand. If the value
844/// is not shifted, set the Shift operand to default of "LSL 0". The logical
845/// instructions allow the shifted register to be rotated, but the arithmetic
846/// instructions do not. The AllowROR parameter specifies whether ROR is
847/// supported.
848bool AArch64DAGToDAGISel::SelectShiftedRegister(SDValue N, bool AllowROR,
849 SDValue &Reg, SDValue &Shift) {
850 if (SelectShiftedRegisterFromAnd(N, Reg, Shift))
851 return true;
852
854 if (ShType == AArch64_AM::InvalidShiftExtend)
855 return false;
856 if (!AllowROR && ShType == AArch64_AM::ROR)
857 return false;
858
859 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
860 unsigned BitSize = N.getValueSizeInBits();
861 unsigned Val = RHS->getZExtValue() & (BitSize - 1);
862 unsigned ShVal = AArch64_AM::getShifterImm(ShType, Val);
863
864 Reg = N.getOperand(0);
865 Shift = CurDAG->getTargetConstant(ShVal, SDLoc(N), MVT::i32);
866 return isWorthFoldingALU(N, true);
867 }
868
869 return false;
870}
871
872/// Instructions that accept extend modifiers like UXTW expect the register
873/// being extended to be a GPR32, but the incoming DAG might be acting on a
874/// GPR64 (either via SEXT_INREG or AND). Extract the appropriate low bits if
875/// this is the case.
877 if (N.getValueType() == MVT::i32)
878 return N;
879
880 SDLoc dl(N);
881 return CurDAG->getTargetExtractSubreg(AArch64::sub_32, dl, MVT::i32, N);
882}
883
884// Returns a suitable CNT/INC/DEC/RDVL multiplier to calculate VSCALE*N.
885template<signed Low, signed High, signed Scale>
886bool AArch64DAGToDAGISel::SelectRDVLImm(SDValue N, SDValue &Imm) {
887 if (!isa<ConstantSDNode>(N))
888 return false;
889
890 int64_t MulImm = cast<ConstantSDNode>(N)->getSExtValue();
891 if ((MulImm % std::abs(Scale)) == 0) {
892 int64_t RDVLImm = MulImm / Scale;
893 if ((RDVLImm >= Low) && (RDVLImm <= High)) {
894 Imm = CurDAG->getTargetConstant(RDVLImm, SDLoc(N), MVT::i32);
895 return true;
896 }
897 }
898
899 return false;
900}
901
902/// SelectArithExtendedRegister - Select a "extended register" operand. This
903/// operand folds in an extend followed by an optional left shift.
904bool AArch64DAGToDAGISel::SelectArithExtendedRegister(SDValue N, SDValue &Reg,
905 SDValue &Shift) {
906 unsigned ShiftVal = 0;
908
909 if (N.getOpcode() == ISD::SHL) {
910 ConstantSDNode *CSD = dyn_cast<ConstantSDNode>(N.getOperand(1));
911 if (!CSD)
912 return false;
913 ShiftVal = CSD->getZExtValue();
914 if (ShiftVal > 4)
915 return false;
916
917 Ext = getExtendTypeForNode(N.getOperand(0));
919 return false;
920
921 Reg = N.getOperand(0).getOperand(0);
922 } else {
925 return false;
926
927 Reg = N.getOperand(0);
928
929 // Don't match if free 32-bit -> 64-bit zext can be used instead. Use the
930 // isDef32 as a heuristic for when the operand is likely to be a 32bit def.
931 auto isDef32 = [](SDValue N) {
932 unsigned Opc = N.getOpcode();
933 return Opc != ISD::TRUNCATE && Opc != TargetOpcode::EXTRACT_SUBREG &&
934 Opc != ISD::CopyFromReg && Opc != ISD::AssertSext &&
935 Opc != ISD::AssertZext && Opc != ISD::AssertAlign &&
936 Opc != ISD::FREEZE;
937 };
938 if (Ext == AArch64_AM::UXTW && Reg->getValueType(0).getSizeInBits() == 32 &&
939 isDef32(Reg))
940 return false;
941 }
942
943 // AArch64 mandates that the RHS of the operation must use the smallest
944 // register class that could contain the size being extended from. Thus,
945 // if we're folding a (sext i8), we need the RHS to be a GPR32, even though
946 // there might not be an actual 32-bit value in the program. We can
947 // (harmlessly) synthesize one by injected an EXTRACT_SUBREG here.
948 assert(Ext != AArch64_AM::UXTX && Ext != AArch64_AM::SXTX);
949 Reg = narrowIfNeeded(CurDAG, Reg);
950 Shift = CurDAG->getTargetConstant(getArithExtendImm(Ext, ShiftVal), SDLoc(N),
951 MVT::i32);
952 return isWorthFoldingALU(N);
953}
954
955/// SelectArithUXTXRegister - Select a "UXTX register" operand. This
956/// operand is refered by the instructions have SP operand
957bool AArch64DAGToDAGISel::SelectArithUXTXRegister(SDValue N, SDValue &Reg,
958 SDValue &Shift) {
959 unsigned ShiftVal = 0;
961
962 if (N.getOpcode() != ISD::SHL)
963 return false;
964
965 ConstantSDNode *CSD = dyn_cast<ConstantSDNode>(N.getOperand(1));
966 if (!CSD)
967 return false;
968 ShiftVal = CSD->getZExtValue();
969 if (ShiftVal > 4)
970 return false;
971
973 Reg = N.getOperand(0);
974 Shift = CurDAG->getTargetConstant(getArithExtendImm(Ext, ShiftVal), SDLoc(N),
975 MVT::i32);
976 return isWorthFoldingALU(N);
977}
978
979/// If there's a use of this ADDlow that's not itself a load/store then we'll
980/// need to create a real ADD instruction from it anyway and there's no point in
981/// folding it into the mem op. Theoretically, it shouldn't matter, but there's
982/// a single pseudo-instruction for an ADRP/ADD pair so over-aggressive folding
983/// leads to duplicated ADRP instructions.
985 for (auto *Use : N->uses()) {
986 if (Use->getOpcode() != ISD::LOAD && Use->getOpcode() != ISD::STORE &&
987 Use->getOpcode() != ISD::ATOMIC_LOAD &&
988 Use->getOpcode() != ISD::ATOMIC_STORE)
989 return false;
990
991 // ldar and stlr have much more restrictive addressing modes (just a
992 // register).
993 if (isStrongerThanMonotonic(cast<MemSDNode>(Use)->getSuccessOrdering()))
994 return false;
995 }
996
997 return true;
998}
999
1000/// SelectAddrModeIndexedBitWidth - Select a "register plus scaled (un)signed BW-bit
1001/// immediate" address. The "Size" argument is the size in bytes of the memory
1002/// reference, which determines the scale.
1003bool AArch64DAGToDAGISel::SelectAddrModeIndexedBitWidth(SDValue N, bool IsSignedImm,
1004 unsigned BW, unsigned Size,
1005 SDValue &Base,
1006 SDValue &OffImm) {
1007 SDLoc dl(N);
1008 const DataLayout &DL = CurDAG->getDataLayout();
1009 const TargetLowering *TLI = getTargetLowering();
1010 if (N.getOpcode() == ISD::FrameIndex) {
1011 int FI = cast<FrameIndexSDNode>(N)->getIndex();
1012 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy(DL));
1013 OffImm = CurDAG->getTargetConstant(0, dl, MVT::i64);
1014 return true;
1015 }
1016
1017 // As opposed to the (12-bit) Indexed addressing mode below, the 7/9-bit signed
1018 // selected here doesn't support labels/immediates, only base+offset.
1019 if (CurDAG->isBaseWithConstantOffset(N)) {
1020 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1021 if (IsSignedImm) {
1022 int64_t RHSC = RHS->getSExtValue();
1023 unsigned Scale = Log2_32(Size);
1024 int64_t Range = 0x1LL << (BW - 1);
1025
1026 if ((RHSC & (Size - 1)) == 0 && RHSC >= -(Range << Scale) &&
1027 RHSC < (Range << Scale)) {
1028 Base = N.getOperand(0);
1029 if (Base.getOpcode() == ISD::FrameIndex) {
1030 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
1031 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy(DL));
1032 }
1033 OffImm = CurDAG->getTargetConstant(RHSC >> Scale, dl, MVT::i64);
1034 return true;
1035 }
1036 } else {
1037 // unsigned Immediate
1038 uint64_t RHSC = RHS->getZExtValue();
1039 unsigned Scale = Log2_32(Size);
1040 uint64_t Range = 0x1ULL << BW;
1041
1042 if ((RHSC & (Size - 1)) == 0 && RHSC < (Range << Scale)) {
1043 Base = N.getOperand(0);
1044 if (Base.getOpcode() == ISD::FrameIndex) {
1045 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
1046 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy(DL));
1047 }
1048 OffImm = CurDAG->getTargetConstant(RHSC >> Scale, dl, MVT::i64);
1049 return true;
1050 }
1051 }
1052 }
1053 }
1054 // Base only. The address will be materialized into a register before
1055 // the memory is accessed.
1056 // add x0, Xbase, #offset
1057 // stp x1, x2, [x0]
1058 Base = N;
1059 OffImm = CurDAG->getTargetConstant(0, dl, MVT::i64);
1060 return true;
1061}
1062
1063/// SelectAddrModeIndexed - Select a "register plus scaled unsigned 12-bit
1064/// immediate" address. The "Size" argument is the size in bytes of the memory
1065/// reference, which determines the scale.
1066bool AArch64DAGToDAGISel::SelectAddrModeIndexed(SDValue N, unsigned Size,
1067 SDValue &Base, SDValue &OffImm) {
1068 SDLoc dl(N);
1069 const DataLayout &DL = CurDAG->getDataLayout();
1070 const TargetLowering *TLI = getTargetLowering();
1071 if (N.getOpcode() == ISD::FrameIndex) {
1072 int FI = cast<FrameIndexSDNode>(N)->getIndex();
1073 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy(DL));
1074 OffImm = CurDAG->getTargetConstant(0, dl, MVT::i64);
1075 return true;
1076 }
1077
1078 if (N.getOpcode() == AArch64ISD::ADDlow && isWorthFoldingADDlow(N)) {
1079 GlobalAddressSDNode *GAN =
1080 dyn_cast<GlobalAddressSDNode>(N.getOperand(1).getNode());
1081 Base = N.getOperand(0);
1082 OffImm = N.getOperand(1);
1083 if (!GAN)
1084 return true;
1085
1086 if (GAN->getOffset() % Size == 0 &&
1088 return true;
1089 }
1090
1091 if (CurDAG->isBaseWithConstantOffset(N)) {
1092 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1093 int64_t RHSC = (int64_t)RHS->getZExtValue();
1094 unsigned Scale = Log2_32(Size);
1095 if ((RHSC & (Size - 1)) == 0 && RHSC >= 0 && RHSC < (0x1000 << Scale)) {
1096 Base = N.getOperand(0);
1097 if (Base.getOpcode() == ISD::FrameIndex) {
1098 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
1099 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy(DL));
1100 }
1101 OffImm = CurDAG->getTargetConstant(RHSC >> Scale, dl, MVT::i64);
1102 return true;
1103 }
1104 }
1105 }
1106
1107 // Before falling back to our general case, check if the unscaled
1108 // instructions can handle this. If so, that's preferable.
1109 if (SelectAddrModeUnscaled(N, Size, Base, OffImm))
1110 return false;
1111
1112 // Base only. The address will be materialized into a register before
1113 // the memory is accessed.
1114 // add x0, Xbase, #offset
1115 // ldr x0, [x0]
1116 Base = N;
1117 OffImm = CurDAG->getTargetConstant(0, dl, MVT::i64);
1118 return true;
1119}
1120
1121/// SelectAddrModeUnscaled - Select a "register plus unscaled signed 9-bit
1122/// immediate" address. This should only match when there is an offset that
1123/// is not valid for a scaled immediate addressing mode. The "Size" argument
1124/// is the size in bytes of the memory reference, which is needed here to know
1125/// what is valid for a scaled immediate.
1126bool AArch64DAGToDAGISel::SelectAddrModeUnscaled(SDValue N, unsigned Size,
1127 SDValue &Base,
1128 SDValue &OffImm) {
1129 if (!CurDAG->isBaseWithConstantOffset(N))
1130 return false;
1131 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1132 int64_t RHSC = RHS->getSExtValue();
1133 // If the offset is valid as a scaled immediate, don't match here.
1134 if ((RHSC & (Size - 1)) == 0 && RHSC >= 0 &&
1135 RHSC < (0x1000 << Log2_32(Size)))
1136 return false;
1137 if (RHSC >= -256 && RHSC < 256) {
1138 Base = N.getOperand(0);
1139 if (Base.getOpcode() == ISD::FrameIndex) {
1140 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
1141 const TargetLowering *TLI = getTargetLowering();
1142 Base = CurDAG->getTargetFrameIndex(
1143 FI, TLI->getPointerTy(CurDAG->getDataLayout()));
1144 }
1145 OffImm = CurDAG->getTargetConstant(RHSC, SDLoc(N), MVT::i64);
1146 return true;
1147 }
1148 }
1149 return false;
1150}
1151
1153 SDLoc dl(N);
1154 SDValue ImpDef = SDValue(
1155 CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, dl, MVT::i64), 0);
1156 return CurDAG->getTargetInsertSubreg(AArch64::sub_32, dl, MVT::i64, ImpDef,
1157 N);
1158}
1159
1160/// Check if the given SHL node (\p N), can be used to form an
1161/// extended register for an addressing mode.
1162bool AArch64DAGToDAGISel::SelectExtendedSHL(SDValue N, unsigned Size,
1163 bool WantExtend, SDValue &Offset,
1164 SDValue &SignExtend) {
1165 assert(N.getOpcode() == ISD::SHL && "Invalid opcode.");
1166 ConstantSDNode *CSD = dyn_cast<ConstantSDNode>(N.getOperand(1));
1167 if (!CSD || (CSD->getZExtValue() & 0x7) != CSD->getZExtValue())
1168 return false;
1169
1170 SDLoc dl(N);
1171 if (WantExtend) {
1173 getExtendTypeForNode(N.getOperand(0), true);
1175 return false;
1176
1177 Offset = narrowIfNeeded(CurDAG, N.getOperand(0).getOperand(0));
1178 SignExtend = CurDAG->getTargetConstant(Ext == AArch64_AM::SXTW, dl,
1179 MVT::i32);
1180 } else {
1181 Offset = N.getOperand(0);
1182 SignExtend = CurDAG->getTargetConstant(0, dl, MVT::i32);
1183 }
1184
1185 unsigned LegalShiftVal = Log2_32(Size);
1186 unsigned ShiftVal = CSD->getZExtValue();
1187
1188 if (ShiftVal != 0 && ShiftVal != LegalShiftVal)
1189 return false;
1190
1191 return isWorthFoldingAddr(N);
1192}
1193
1194bool AArch64DAGToDAGISel::SelectAddrModeWRO(SDValue N, unsigned Size,
1196 SDValue &SignExtend,
1197 SDValue &DoShift) {
1198 if (N.getOpcode() != ISD::ADD)
1199 return false;
1200 SDValue LHS = N.getOperand(0);
1201 SDValue RHS = N.getOperand(1);
1202 SDLoc dl(N);
1203
1204 // We don't want to match immediate adds here, because they are better lowered
1205 // to the register-immediate addressing modes.
1206 if (isa<ConstantSDNode>(LHS) || isa<ConstantSDNode>(RHS))
1207 return false;
1208
1209 // Check if this particular node is reused in any non-memory related
1210 // operation. If yes, do not try to fold this node into the address
1211 // computation, since the computation will be kept.
1212 const SDNode *Node = N.getNode();
1213 for (SDNode *UI : Node->uses()) {
1214 if (!isa<MemSDNode>(*UI))
1215 return false;
1216 }
1217
1218 // Remember if it is worth folding N when it produces extended register.
1219 bool IsExtendedRegisterWorthFolding = isWorthFoldingAddr(N);
1220
1221 // Try to match a shifted extend on the RHS.
1222 if (IsExtendedRegisterWorthFolding && RHS.getOpcode() == ISD::SHL &&
1223 SelectExtendedSHL(RHS, Size, true, Offset, SignExtend)) {
1224 Base = LHS;
1225 DoShift = CurDAG->getTargetConstant(true, dl, MVT::i32);
1226 return true;
1227 }
1228
1229 // Try to match a shifted extend on the LHS.
1230 if (IsExtendedRegisterWorthFolding && LHS.getOpcode() == ISD::SHL &&
1231 SelectExtendedSHL(LHS, Size, true, Offset, SignExtend)) {
1232 Base = RHS;
1233 DoShift = CurDAG->getTargetConstant(true, dl, MVT::i32);
1234 return true;
1235 }
1236
1237 // There was no shift, whatever else we find.
1238 DoShift = CurDAG->getTargetConstant(false, dl, MVT::i32);
1239
1241 // Try to match an unshifted extend on the LHS.
1242 if (IsExtendedRegisterWorthFolding &&
1243 (Ext = getExtendTypeForNode(LHS, true)) !=
1245 Base = RHS;
1246 Offset = narrowIfNeeded(CurDAG, LHS.getOperand(0));
1247 SignExtend = CurDAG->getTargetConstant(Ext == AArch64_AM::SXTW, dl,
1248 MVT::i32);
1249 if (isWorthFoldingAddr(LHS))
1250 return true;
1251 }
1252
1253 // Try to match an unshifted extend on the RHS.
1254 if (IsExtendedRegisterWorthFolding &&
1255 (Ext = getExtendTypeForNode(RHS, true)) !=
1257 Base = LHS;
1258 Offset = narrowIfNeeded(CurDAG, RHS.getOperand(0));
1259 SignExtend = CurDAG->getTargetConstant(Ext == AArch64_AM::SXTW, dl,
1260 MVT::i32);
1261 if (isWorthFoldingAddr(RHS))
1262 return true;
1263 }
1264
1265 return false;
1266}
1267
1268// Check if the given immediate is preferred by ADD. If an immediate can be
1269// encoded in an ADD, or it can be encoded in an "ADD LSL #12" and can not be
1270// encoded by one MOVZ, return true.
1271static bool isPreferredADD(int64_t ImmOff) {
1272 // Constant in [0x0, 0xfff] can be encoded in ADD.
1273 if ((ImmOff & 0xfffffffffffff000LL) == 0x0LL)
1274 return true;
1275 // Check if it can be encoded in an "ADD LSL #12".
1276 if ((ImmOff & 0xffffffffff000fffLL) == 0x0LL)
1277 // As a single MOVZ is faster than a "ADD of LSL #12", ignore such constant.
1278 return (ImmOff & 0xffffffffff00ffffLL) != 0x0LL &&
1279 (ImmOff & 0xffffffffffff0fffLL) != 0x0LL;
1280 return false;
1281}
1282
1283bool AArch64DAGToDAGISel::SelectAddrModeXRO(SDValue N, unsigned Size,
1285 SDValue &SignExtend,
1286 SDValue &DoShift) {
1287 if (N.getOpcode() != ISD::ADD)
1288 return false;
1289 SDValue LHS = N.getOperand(0);
1290 SDValue RHS = N.getOperand(1);
1291 SDLoc DL(N);
1292
1293 // Check if this particular node is reused in any non-memory related
1294 // operation. If yes, do not try to fold this node into the address
1295 // computation, since the computation will be kept.
1296 const SDNode *Node = N.getNode();
1297 for (SDNode *UI : Node->uses()) {
1298 if (!isa<MemSDNode>(*UI))
1299 return false;
1300 }
1301
1302 // Watch out if RHS is a wide immediate, it can not be selected into
1303 // [BaseReg+Imm] addressing mode. Also it may not be able to be encoded into
1304 // ADD/SUB. Instead it will use [BaseReg + 0] address mode and generate
1305 // instructions like:
1306 // MOV X0, WideImmediate
1307 // ADD X1, BaseReg, X0
1308 // LDR X2, [X1, 0]
1309 // For such situation, using [BaseReg, XReg] addressing mode can save one
1310 // ADD/SUB:
1311 // MOV X0, WideImmediate
1312 // LDR X2, [BaseReg, X0]
1313 if (isa<ConstantSDNode>(RHS)) {
1314 int64_t ImmOff = (int64_t)cast<ConstantSDNode>(RHS)->getZExtValue();
1315 unsigned Scale = Log2_32(Size);
1316 // Skip the immediate can be selected by load/store addressing mode.
1317 // Also skip the immediate can be encoded by a single ADD (SUB is also
1318 // checked by using -ImmOff).
1319 if ((ImmOff % Size == 0 && ImmOff >= 0 && ImmOff < (0x1000 << Scale)) ||
1320 isPreferredADD(ImmOff) || isPreferredADD(-ImmOff))
1321 return false;
1322
1323 SDValue Ops[] = { RHS };
1324 SDNode *MOVI =
1325 CurDAG->getMachineNode(AArch64::MOVi64imm, DL, MVT::i64, Ops);
1326 SDValue MOVIV = SDValue(MOVI, 0);
1327 // This ADD of two X register will be selected into [Reg+Reg] mode.
1328 N = CurDAG->getNode(ISD::ADD, DL, MVT::i64, LHS, MOVIV);
1329 }
1330
1331 // Remember if it is worth folding N when it produces extended register.
1332 bool IsExtendedRegisterWorthFolding = isWorthFoldingAddr(N);
1333
1334 // Try to match a shifted extend on the RHS.
1335 if (IsExtendedRegisterWorthFolding && RHS.getOpcode() == ISD::SHL &&
1336 SelectExtendedSHL(RHS, Size, false, Offset, SignExtend)) {
1337 Base = LHS;
1338 DoShift = CurDAG->getTargetConstant(true, DL, MVT::i32);
1339 return true;
1340 }
1341
1342 // Try to match a shifted extend on the LHS.
1343 if (IsExtendedRegisterWorthFolding && LHS.getOpcode() == ISD::SHL &&
1344 SelectExtendedSHL(LHS, Size, false, Offset, SignExtend)) {
1345 Base = RHS;
1346 DoShift = CurDAG->getTargetConstant(true, DL, MVT::i32);
1347 return true;
1348 }
1349
1350 // Match any non-shifted, non-extend, non-immediate add expression.
1351 Base = LHS;
1352 Offset = RHS;
1353 SignExtend = CurDAG->getTargetConstant(false, DL, MVT::i32);
1354 DoShift = CurDAG->getTargetConstant(false, DL, MVT::i32);
1355 // Reg1 + Reg2 is free: no check needed.
1356 return true;
1357}
1358
1359SDValue AArch64DAGToDAGISel::createDTuple(ArrayRef<SDValue> Regs) {
1360 static const unsigned RegClassIDs[] = {
1361 AArch64::DDRegClassID, AArch64::DDDRegClassID, AArch64::DDDDRegClassID};
1362 static const unsigned SubRegs[] = {AArch64::dsub0, AArch64::dsub1,
1363 AArch64::dsub2, AArch64::dsub3};
1364
1365 return createTuple(Regs, RegClassIDs, SubRegs);
1366}
1367
1368SDValue AArch64DAGToDAGISel::createQTuple(ArrayRef<SDValue> Regs) {
1369 static const unsigned RegClassIDs[] = {
1370 AArch64::QQRegClassID, AArch64::QQQRegClassID, AArch64::QQQQRegClassID};
1371 static const unsigned SubRegs[] = {AArch64::qsub0, AArch64::qsub1,
1372 AArch64::qsub2, AArch64::qsub3};
1373
1374 return createTuple(Regs, RegClassIDs, SubRegs);
1375}
1376
1377SDValue AArch64DAGToDAGISel::createZTuple(ArrayRef<SDValue> Regs) {
1378 static const unsigned RegClassIDs[] = {AArch64::ZPR2RegClassID,
1379 AArch64::ZPR3RegClassID,
1380 AArch64::ZPR4RegClassID};
1381 static const unsigned SubRegs[] = {AArch64::zsub0, AArch64::zsub1,
1382 AArch64::zsub2, AArch64::zsub3};
1383
1384 return createTuple(Regs, RegClassIDs, SubRegs);
1385}
1386
1387SDValue AArch64DAGToDAGISel::createZMulTuple(ArrayRef<SDValue> Regs) {
1388 assert(Regs.size() == 2 || Regs.size() == 4);
1389
1390 // The createTuple interface requires 3 RegClassIDs for each possible
1391 // tuple type even though we only have them for ZPR2 and ZPR4.
1392 static const unsigned RegClassIDs[] = {AArch64::ZPR2Mul2RegClassID, 0,
1393 AArch64::ZPR4Mul4RegClassID};
1394 static const unsigned SubRegs[] = {AArch64::zsub0, AArch64::zsub1,
1395 AArch64::zsub2, AArch64::zsub3};
1396 return createTuple(Regs, RegClassIDs, SubRegs);
1397}
1398
1399SDValue AArch64DAGToDAGISel::createTuple(ArrayRef<SDValue> Regs,
1400 const unsigned RegClassIDs[],
1401 const unsigned SubRegs[]) {
1402 // There's no special register-class for a vector-list of 1 element: it's just
1403 // a vector.
1404 if (Regs.size() == 1)
1405 return Regs[0];
1406
1407 assert(Regs.size() >= 2 && Regs.size() <= 4);
1408
1409 SDLoc DL(Regs[0]);
1410
1412
1413 // First operand of REG_SEQUENCE is the desired RegClass.
1414 Ops.push_back(
1415 CurDAG->getTargetConstant(RegClassIDs[Regs.size() - 2], DL, MVT::i32));
1416
1417 // Then we get pairs of source & subregister-position for the components.
1418 for (unsigned i = 0; i < Regs.size(); ++i) {
1419 Ops.push_back(Regs[i]);
1420 Ops.push_back(CurDAG->getTargetConstant(SubRegs[i], DL, MVT::i32));
1421 }
1422
1423 SDNode *N =
1424 CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, DL, MVT::Untyped, Ops);
1425 return SDValue(N, 0);
1426}
1427
1428void AArch64DAGToDAGISel::SelectTable(SDNode *N, unsigned NumVecs, unsigned Opc,
1429 bool isExt) {
1430 SDLoc dl(N);
1431 EVT VT = N->getValueType(0);
1432
1433 unsigned ExtOff = isExt;
1434
1435 // Form a REG_SEQUENCE to force register allocation.
1436 unsigned Vec0Off = ExtOff + 1;
1437 SmallVector<SDValue, 4> Regs(N->op_begin() + Vec0Off,
1438 N->op_begin() + Vec0Off + NumVecs);
1439 SDValue RegSeq = createQTuple(Regs);
1440
1442 if (isExt)
1443 Ops.push_back(N->getOperand(1));
1444 Ops.push_back(RegSeq);
1445 Ops.push_back(N->getOperand(NumVecs + ExtOff + 1));
1446 ReplaceNode(N, CurDAG->getMachineNode(Opc, dl, VT, Ops));
1447}
1448
1449bool AArch64DAGToDAGISel::tryIndexedLoad(SDNode *N) {
1450 LoadSDNode *LD = cast<LoadSDNode>(N);
1451 if (LD->isUnindexed())
1452 return false;
1453 EVT VT = LD->getMemoryVT();
1454 EVT DstVT = N->getValueType(0);
1455 ISD::MemIndexedMode AM = LD->getAddressingMode();
1456 bool IsPre = AM == ISD::PRE_INC || AM == ISD::PRE_DEC;
1457
1458 // We're not doing validity checking here. That was done when checking
1459 // if we should mark the load as indexed or not. We're just selecting
1460 // the right instruction.
1461 unsigned Opcode = 0;
1462
1463 ISD::LoadExtType ExtType = LD->getExtensionType();
1464 bool InsertTo64 = false;
1465 if (VT == MVT::i64)
1466 Opcode = IsPre ? AArch64::LDRXpre : AArch64::LDRXpost;
1467 else if (VT == MVT::i32) {
1468 if (ExtType == ISD::NON_EXTLOAD)
1469 Opcode = IsPre ? AArch64::LDRWpre : AArch64::LDRWpost;
1470 else if (ExtType == ISD::SEXTLOAD)
1471 Opcode = IsPre ? AArch64::LDRSWpre : AArch64::LDRSWpost;
1472 else {
1473 Opcode = IsPre ? AArch64::LDRWpre : AArch64::LDRWpost;
1474 InsertTo64 = true;
1475 // The result of the load is only i32. It's the subreg_to_reg that makes
1476 // it into an i64.
1477 DstVT = MVT::i32;
1478 }
1479 } else if (VT == MVT::i16) {
1480 if (ExtType == ISD::SEXTLOAD) {
1481 if (DstVT == MVT::i64)
1482 Opcode = IsPre ? AArch64::LDRSHXpre : AArch64::LDRSHXpost;
1483 else
1484 Opcode = IsPre ? AArch64::LDRSHWpre : AArch64::LDRSHWpost;
1485 } else {
1486 Opcode = IsPre ? AArch64::LDRHHpre : AArch64::LDRHHpost;
1487 InsertTo64 = DstVT == MVT::i64;
1488 // The result of the load is only i32. It's the subreg_to_reg that makes
1489 // it into an i64.
1490 DstVT = MVT::i32;
1491 }
1492 } else if (VT == MVT::i8) {
1493 if (ExtType == ISD::SEXTLOAD) {
1494 if (DstVT == MVT::i64)
1495 Opcode = IsPre ? AArch64::LDRSBXpre : AArch64::LDRSBXpost;
1496 else
1497 Opcode = IsPre ? AArch64::LDRSBWpre : AArch64::LDRSBWpost;
1498 } else {
1499 Opcode = IsPre ? AArch64::LDRBBpre : AArch64::LDRBBpost;
1500 InsertTo64 = DstVT == MVT::i64;
1501 // The result of the load is only i32. It's the subreg_to_reg that makes
1502 // it into an i64.
1503 DstVT = MVT::i32;
1504 }
1505 } else if (VT == MVT::f16) {
1506 Opcode = IsPre ? AArch64::LDRHpre : AArch64::LDRHpost;
1507 } else if (VT == MVT::bf16) {
1508 Opcode = IsPre ? AArch64::LDRHpre : AArch64::LDRHpost;
1509 } else if (VT == MVT::f32) {
1510 Opcode = IsPre ? AArch64::LDRSpre : AArch64::LDRSpost;
1511 } else if (VT == MVT::f64 || VT.is64BitVector()) {
1512 Opcode = IsPre ? AArch64::LDRDpre : AArch64::LDRDpost;
1513 } else if (VT.is128BitVector()) {
1514 Opcode = IsPre ? AArch64::LDRQpre : AArch64::LDRQpost;
1515 } else
1516 return false;
1517 SDValue Chain = LD->getChain();
1518 SDValue Base = LD->getBasePtr();
1519 ConstantSDNode *OffsetOp = cast<ConstantSDNode>(LD->getOffset());
1520 int OffsetVal = (int)OffsetOp->getZExtValue();
1521 SDLoc dl(N);
1522 SDValue Offset = CurDAG->getTargetConstant(OffsetVal, dl, MVT::i64);
1523 SDValue Ops[] = { Base, Offset, Chain };
1524 SDNode *Res = CurDAG->getMachineNode(Opcode, dl, MVT::i64, DstVT,
1525 MVT::Other, Ops);
1526
1527 // Transfer memoperands.
1528 MachineMemOperand *MemOp = cast<MemSDNode>(N)->getMemOperand();
1529 CurDAG->setNodeMemRefs(cast<MachineSDNode>(Res), {MemOp});
1530
1531 // Either way, we're replacing the node, so tell the caller that.
1532 SDValue LoadedVal = SDValue(Res, 1);
1533 if (InsertTo64) {
1534 SDValue SubReg = CurDAG->getTargetConstant(AArch64::sub_32, dl, MVT::i32);
1535 LoadedVal =
1536 SDValue(CurDAG->getMachineNode(
1537 AArch64::SUBREG_TO_REG, dl, MVT::i64,
1538 CurDAG->getTargetConstant(0, dl, MVT::i64), LoadedVal,
1539 SubReg),
1540 0);
1541 }
1542
1543 ReplaceUses(SDValue(N, 0), LoadedVal);
1544 ReplaceUses(SDValue(N, 1), SDValue(Res, 0));
1545 ReplaceUses(SDValue(N, 2), SDValue(Res, 2));
1546 CurDAG->RemoveDeadNode(N);
1547 return true;
1548}
1549
1550void AArch64DAGToDAGISel::SelectLoad(SDNode *N, unsigned NumVecs, unsigned Opc,
1551 unsigned SubRegIdx) {
1552 SDLoc dl(N);
1553 EVT VT = N->getValueType(0);
1554 SDValue Chain = N->getOperand(0);
1555
1556 SDValue Ops[] = {N->getOperand(2), // Mem operand;
1557 Chain};
1558
1559 const EVT ResTys[] = {MVT::Untyped, MVT::Other};
1560
1561 SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1562 SDValue SuperReg = SDValue(Ld, 0);
1563 for (unsigned i = 0; i < NumVecs; ++i)
1564 ReplaceUses(SDValue(N, i),
1565 CurDAG->getTargetExtractSubreg(SubRegIdx + i, dl, VT, SuperReg));
1566
1567 ReplaceUses(SDValue(N, NumVecs), SDValue(Ld, 1));
1568
1569 // Transfer memoperands. In the case of AArch64::LD64B, there won't be one,
1570 // because it's too simple to have needed special treatment during lowering.
1571 if (auto *MemIntr = dyn_cast<MemIntrinsicSDNode>(N)) {
1572 MachineMemOperand *MemOp = MemIntr->getMemOperand();
1573 CurDAG->setNodeMemRefs(cast<MachineSDNode>(Ld), {MemOp});
1574 }
1575
1576 CurDAG->RemoveDeadNode(N);
1577}
1578
1579void AArch64DAGToDAGISel::SelectPostLoad(SDNode *N, unsigned NumVecs,
1580 unsigned Opc, unsigned SubRegIdx) {
1581 SDLoc dl(N);
1582 EVT VT = N->getValueType(0);
1583 SDValue Chain = N->getOperand(0);
1584
1585 SDValue Ops[] = {N->getOperand(1), // Mem operand
1586 N->getOperand(2), // Incremental
1587 Chain};
1588
1589 const EVT ResTys[] = {MVT::i64, // Type of the write back register
1590 MVT::Untyped, MVT::Other};
1591
1592 SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1593
1594 // Update uses of write back register
1595 ReplaceUses(SDValue(N, NumVecs), SDValue(Ld, 0));
1596
1597 // Update uses of vector list
1598 SDValue SuperReg = SDValue(Ld, 1);
1599 if (NumVecs == 1)
1600 ReplaceUses(SDValue(N, 0), SuperReg);
1601 else
1602 for (unsigned i = 0; i < NumVecs; ++i)
1603 ReplaceUses(SDValue(N, i),
1604 CurDAG->getTargetExtractSubreg(SubRegIdx + i, dl, VT, SuperReg));
1605
1606 // Update the chain
1607 ReplaceUses(SDValue(N, NumVecs + 1), SDValue(Ld, 2));
1608 CurDAG->RemoveDeadNode(N);
1609}
1610
1611/// Optimize \param OldBase and \param OldOffset selecting the best addressing
1612/// mode. Returns a tuple consisting of an Opcode, an SDValue representing the
1613/// new Base and an SDValue representing the new offset.
1614std::tuple<unsigned, SDValue, SDValue>
1615AArch64DAGToDAGISel::findAddrModeSVELoadStore(SDNode *N, unsigned Opc_rr,
1616 unsigned Opc_ri,
1617 const SDValue &OldBase,
1618 const SDValue &OldOffset,
1619 unsigned Scale) {
1620 SDValue NewBase = OldBase;
1621 SDValue NewOffset = OldOffset;
1622 // Detect a possible Reg+Imm addressing mode.
1623 const bool IsRegImm = SelectAddrModeIndexedSVE</*Min=*/-8, /*Max=*/7>(
1624 N, OldBase, NewBase, NewOffset);
1625
1626 // Detect a possible reg+reg addressing mode, but only if we haven't already
1627 // detected a Reg+Imm one.
1628 const bool IsRegReg =
1629 !IsRegImm && SelectSVERegRegAddrMode(OldBase, Scale, NewBase, NewOffset);
1630
1631 // Select the instruction.
1632 return std::make_tuple(IsRegReg ? Opc_rr : Opc_ri, NewBase, NewOffset);
1633}
1634
1635enum class SelectTypeKind {
1636 Int1 = 0,
1637 Int = 1,
1638 FP = 2,
1639 AnyType = 3,
1640};
1641
1642/// This function selects an opcode from a list of opcodes, which is
1643/// expected to be the opcode for { 8-bit, 16-bit, 32-bit, 64-bit }
1644/// element types, in this order.
1645template <SelectTypeKind Kind>
1646static unsigned SelectOpcodeFromVT(EVT VT, ArrayRef<unsigned> Opcodes) {
1647 // Only match scalable vector VTs
1648 if (!VT.isScalableVector())
1649 return 0;
1650
1651 EVT EltVT = VT.getVectorElementType();
1652 switch (Kind) {
1654 break;
1656 if (EltVT != MVT::i8 && EltVT != MVT::i16 && EltVT != MVT::i32 &&
1657 EltVT != MVT::i64)
1658 return 0;
1659 break;
1661 if (EltVT != MVT::i1)
1662 return 0;
1663 break;
1664 case SelectTypeKind::FP:
1665 if (EltVT != MVT::f16 && EltVT != MVT::f32 && EltVT != MVT::f64)
1666 return 0;
1667 break;
1668 }
1669
1670 unsigned Offset;
1671 switch (VT.getVectorMinNumElements()) {
1672 case 16: // 8-bit
1673 Offset = 0;
1674 break;
1675 case 8: // 16-bit
1676 Offset = 1;
1677 break;
1678 case 4: // 32-bit
1679 Offset = 2;
1680 break;
1681 case 2: // 64-bit
1682 Offset = 3;
1683 break;
1684 default:
1685 return 0;
1686 }
1687
1688 return (Opcodes.size() <= Offset) ? 0 : Opcodes[Offset];
1689}
1690
1691// This function is almost identical to SelectWhilePair, but has an
1692// extra check on the range of the immediate operand.
1693// TODO: Merge these two functions together at some point?
1694void AArch64DAGToDAGISel::SelectPExtPair(SDNode *N, unsigned Opc) {
1695 // Immediate can be either 0 or 1.
1696 if (ConstantSDNode *Imm = dyn_cast<ConstantSDNode>(N->getOperand(2)))
1697 if (Imm->getZExtValue() > 1)
1698 return;
1699
1700 SDLoc DL(N);
1701 EVT VT = N->getValueType(0);
1702 SDValue Ops[] = {N->getOperand(1), N->getOperand(2)};
1703 SDNode *WhilePair = CurDAG->getMachineNode(Opc, DL, MVT::Untyped, Ops);
1704 SDValue SuperReg = SDValue(WhilePair, 0);
1705
1706 for (unsigned I = 0; I < 2; ++I)
1707 ReplaceUses(SDValue(N, I), CurDAG->getTargetExtractSubreg(
1708 AArch64::psub0 + I, DL, VT, SuperReg));
1709
1710 CurDAG->RemoveDeadNode(N);
1711}
1712
1713void AArch64DAGToDAGISel::SelectWhilePair(SDNode *N, unsigned Opc) {
1714 SDLoc DL(N);
1715 EVT VT = N->getValueType(0);
1716
1717 SDValue Ops[] = {N->getOperand(1), N->getOperand(2)};
1718
1719 SDNode *WhilePair = CurDAG->getMachineNode(Opc, DL, MVT::Untyped, Ops);
1720 SDValue SuperReg = SDValue(WhilePair, 0);
1721
1722 for (unsigned I = 0; I < 2; ++I)
1723 ReplaceUses(SDValue(N, I), CurDAG->getTargetExtractSubreg(
1724 AArch64::psub0 + I, DL, VT, SuperReg));
1725
1726 CurDAG->RemoveDeadNode(N);
1727}
1728
1729void AArch64DAGToDAGISel::SelectCVTIntrinsic(SDNode *N, unsigned NumVecs,
1730 unsigned Opcode) {
1731 EVT VT = N->getValueType(0);
1732 SmallVector<SDValue, 4> Regs(N->op_begin() + 1, N->op_begin() + 1 + NumVecs);
1733 SDValue Ops = createZTuple(Regs);
1734 SDLoc DL(N);
1735 SDNode *Intrinsic = CurDAG->getMachineNode(Opcode, DL, MVT::Untyped, Ops);
1736 SDValue SuperReg = SDValue(Intrinsic, 0);
1737 for (unsigned i = 0; i < NumVecs; ++i)
1738 ReplaceUses(SDValue(N, i), CurDAG->getTargetExtractSubreg(
1739 AArch64::zsub0 + i, DL, VT, SuperReg));
1740
1741 CurDAG->RemoveDeadNode(N);
1742}
1743
1744void AArch64DAGToDAGISel::SelectDestructiveMultiIntrinsic(SDNode *N,
1745 unsigned NumVecs,
1746 bool IsZmMulti,
1747 unsigned Opcode,
1748 bool HasPred) {
1749 assert(Opcode != 0 && "Unexpected opcode");
1750
1751 SDLoc DL(N);
1752 EVT VT = N->getValueType(0);
1753 unsigned FirstVecIdx = HasPred ? 2 : 1;
1754
1755 auto GetMultiVecOperand = [=](unsigned StartIdx) {
1756 SmallVector<SDValue, 4> Regs(N->op_begin() + StartIdx,
1757 N->op_begin() + StartIdx + NumVecs);
1758 return createZMulTuple(Regs);
1759 };
1760
1761 SDValue Zdn = GetMultiVecOperand(FirstVecIdx);
1762
1763 SDValue Zm;
1764 if (IsZmMulti)
1765 Zm = GetMultiVecOperand(NumVecs + FirstVecIdx);
1766 else
1767 Zm = N->getOperand(NumVecs + FirstVecIdx);
1768
1770 if (HasPred)
1771 Intrinsic = CurDAG->getMachineNode(Opcode, DL, MVT::Untyped,
1772 N->getOperand(1), Zdn, Zm);
1773 else
1774 Intrinsic = CurDAG->getMachineNode(Opcode, DL, MVT::Untyped, Zdn, Zm);
1775 SDValue SuperReg = SDValue(Intrinsic, 0);
1776 for (unsigned i = 0; i < NumVecs; ++i)
1777 ReplaceUses(SDValue(N, i), CurDAG->getTargetExtractSubreg(
1778 AArch64::zsub0 + i, DL, VT, SuperReg));
1779
1780 CurDAG->RemoveDeadNode(N);
1781}
1782
1783void AArch64DAGToDAGISel::SelectPredicatedLoad(SDNode *N, unsigned NumVecs,
1784 unsigned Scale, unsigned Opc_ri,
1785 unsigned Opc_rr, bool IsIntr) {
1786 assert(Scale < 4 && "Invalid scaling value.");
1787 SDLoc DL(N);
1788 EVT VT = N->getValueType(0);
1789 SDValue Chain = N->getOperand(0);
1790
1791 // Optimize addressing mode.
1793 unsigned Opc;
1794 std::tie(Opc, Base, Offset) = findAddrModeSVELoadStore(
1795 N, Opc_rr, Opc_ri, N->getOperand(IsIntr ? 3 : 2),
1796 CurDAG->getTargetConstant(0, DL, MVT::i64), Scale);
1797
1798 SDValue Ops[] = {N->getOperand(IsIntr ? 2 : 1), // Predicate
1799 Base, // Memory operand
1800 Offset, Chain};
1801
1802 const EVT ResTys[] = {MVT::Untyped, MVT::Other};
1803
1804 SDNode *Load = CurDAG->getMachineNode(Opc, DL, ResTys, Ops);
1805 SDValue SuperReg = SDValue(Load, 0);
1806 for (unsigned i = 0; i < NumVecs; ++i)
1807 ReplaceUses(SDValue(N, i), CurDAG->getTargetExtractSubreg(
1808 AArch64::zsub0 + i, DL, VT, SuperReg));
1809
1810 // Copy chain
1811 unsigned ChainIdx = NumVecs;
1812 ReplaceUses(SDValue(N, ChainIdx), SDValue(Load, 1));
1813 CurDAG->RemoveDeadNode(N);
1814}
1815
1816void AArch64DAGToDAGISel::SelectContiguousMultiVectorLoad(SDNode *N,
1817 unsigned NumVecs,
1818 unsigned Scale,
1819 unsigned Opc_ri,
1820 unsigned Opc_rr) {
1821 assert(Scale < 4 && "Invalid scaling value.");
1822 SDLoc DL(N);
1823 EVT VT = N->getValueType(0);
1824 SDValue Chain = N->getOperand(0);
1825
1826 SDValue PNg = N->getOperand(2);
1827 SDValue Base = N->getOperand(3);
1828 SDValue Offset = CurDAG->getTargetConstant(0, DL, MVT::i64);
1829 unsigned Opc;
1830 std::tie(Opc, Base, Offset) =
1831 findAddrModeSVELoadStore(N, Opc_rr, Opc_ri, Base, Offset, Scale);
1832
1833 SDValue Ops[] = {PNg, // Predicate-as-counter
1834 Base, // Memory operand
1835 Offset, Chain};
1836
1837 const EVT ResTys[] = {MVT::Untyped, MVT::Other};
1838
1839 SDNode *Load = CurDAG->getMachineNode(Opc, DL, ResTys, Ops);
1840 SDValue SuperReg = SDValue(Load, 0);
1841 for (unsigned i = 0; i < NumVecs; ++i)
1842 ReplaceUses(SDValue(N, i), CurDAG->getTargetExtractSubreg(
1843 AArch64::zsub0 + i, DL, VT, SuperReg));
1844
1845 // Copy chain
1846 unsigned ChainIdx = NumVecs;
1847 ReplaceUses(SDValue(N, ChainIdx), SDValue(Load, 1));
1848 CurDAG->RemoveDeadNode(N);
1849}
1850
1851void AArch64DAGToDAGISel::SelectFrintFromVT(SDNode *N, unsigned NumVecs,
1852 unsigned Opcode) {
1853 if (N->getValueType(0) != MVT::nxv4f32)
1854 return;
1855 SelectUnaryMultiIntrinsic(N, NumVecs, true, Opcode);
1856}
1857
1858void AArch64DAGToDAGISel::SelectClamp(SDNode *N, unsigned NumVecs,
1859 unsigned Op) {
1860 SDLoc DL(N);
1861 EVT VT = N->getValueType(0);
1862
1863 SmallVector<SDValue, 4> Regs(N->op_begin() + 1, N->op_begin() + 1 + NumVecs);
1864 SDValue Zd = createZMulTuple(Regs);
1865 SDValue Zn = N->getOperand(1 + NumVecs);
1866 SDValue Zm = N->getOperand(2 + NumVecs);
1867
1868 SDValue Ops[] = {Zd, Zn, Zm};
1869
1870 SDNode *Intrinsic = CurDAG->getMachineNode(Op, DL, MVT::Untyped, Ops);
1871 SDValue SuperReg = SDValue(Intrinsic, 0);
1872 for (unsigned i = 0; i < NumVecs; ++i)
1873 ReplaceUses(SDValue(N, i), CurDAG->getTargetExtractSubreg(
1874 AArch64::zsub0 + i, DL, VT, SuperReg));
1875
1876 CurDAG->RemoveDeadNode(N);
1877}
1878
1879bool SelectSMETile(unsigned &BaseReg, unsigned TileNum) {
1880 switch (BaseReg) {
1881 default:
1882 return false;
1883 case AArch64::ZA:
1884 case AArch64::ZAB0:
1885 if (TileNum == 0)
1886 break;
1887 return false;
1888 case AArch64::ZAH0:
1889 if (TileNum <= 1)
1890 break;
1891 return false;
1892 case AArch64::ZAS0:
1893 if (TileNum <= 3)
1894 break;
1895 return false;
1896 case AArch64::ZAD0:
1897 if (TileNum <= 7)
1898 break;
1899 return false;
1900 }
1901
1902 BaseReg += TileNum;
1903 return true;
1904}
1905
1906template <unsigned MaxIdx, unsigned Scale>
1907void AArch64DAGToDAGISel::SelectMultiVectorMove(SDNode *N, unsigned NumVecs,
1908 unsigned BaseReg, unsigned Op) {
1909 unsigned TileNum = 0;
1910 if (BaseReg != AArch64::ZA)
1911 TileNum = cast<ConstantSDNode>(N->getOperand(2))->getZExtValue();
1912
1913 if (!SelectSMETile(BaseReg, TileNum))
1914 return;
1915
1916 SDValue SliceBase, Base, Offset;
1917 if (BaseReg == AArch64::ZA)
1918 SliceBase = N->getOperand(2);
1919 else
1920 SliceBase = N->getOperand(3);
1921
1922 if (!SelectSMETileSlice(SliceBase, MaxIdx, Base, Offset, Scale))
1923 return;
1924
1925 SDLoc DL(N);
1926 SDValue SubReg = CurDAG->getRegister(BaseReg, MVT::Other);
1927 SDValue Ops[] = {SubReg, Base, Offset, /*Chain*/ N->getOperand(0)};
1928 SDNode *Mov = CurDAG->getMachineNode(Op, DL, {MVT::Untyped, MVT::Other}, Ops);
1929
1930 EVT VT = N->getValueType(0);
1931 for (unsigned I = 0; I < NumVecs; ++I)
1932 ReplaceUses(SDValue(N, I),
1933 CurDAG->getTargetExtractSubreg(AArch64::zsub0 + I, DL, VT,
1934 SDValue(Mov, 0)));
1935 // Copy chain
1936 unsigned ChainIdx = NumVecs;
1937 ReplaceUses(SDValue(N, ChainIdx), SDValue(Mov, 1));
1938 CurDAG->RemoveDeadNode(N);
1939}
1940
1941void AArch64DAGToDAGISel::SelectUnaryMultiIntrinsic(SDNode *N,
1942 unsigned NumOutVecs,
1943 bool IsTupleInput,
1944 unsigned Opc) {
1945 SDLoc DL(N);
1946 EVT VT = N->getValueType(0);
1947 unsigned NumInVecs = N->getNumOperands() - 1;
1948
1950 if (IsTupleInput) {
1951 assert((NumInVecs == 2 || NumInVecs == 4) &&
1952 "Don't know how to handle multi-register input!");
1953 SmallVector<SDValue, 4> Regs(N->op_begin() + 1,
1954 N->op_begin() + 1 + NumInVecs);
1955 Ops.push_back(createZMulTuple(Regs));
1956 } else {
1957 // All intrinsic nodes have the ID as the first operand, hence the "1 + I".
1958 for (unsigned I = 0; I < NumInVecs; I++)
1959 Ops.push_back(N->getOperand(1 + I));
1960 }
1961
1962 SDNode *Res = CurDAG->getMachineNode(Opc, DL, MVT::Untyped, Ops);
1963 SDValue SuperReg = SDValue(Res, 0);
1964
1965 for (unsigned I = 0; I < NumOutVecs; I++)
1966 ReplaceUses(SDValue(N, I), CurDAG->getTargetExtractSubreg(
1967 AArch64::zsub0 + I, DL, VT, SuperReg));
1968 CurDAG->RemoveDeadNode(N);
1969}
1970
1971void AArch64DAGToDAGISel::SelectStore(SDNode *N, unsigned NumVecs,
1972 unsigned Opc) {
1973 SDLoc dl(N);
1974 EVT VT = N->getOperand(2)->getValueType(0);
1975
1976 // Form a REG_SEQUENCE to force register allocation.
1977 bool Is128Bit = VT.getSizeInBits() == 128;
1978 SmallVector<SDValue, 4> Regs(N->op_begin() + 2, N->op_begin() + 2 + NumVecs);
1979 SDValue RegSeq = Is128Bit ? createQTuple(Regs) : createDTuple(Regs);
1980
1981 SDValue Ops[] = {RegSeq, N->getOperand(NumVecs + 2), N->getOperand(0)};
1982 SDNode *St = CurDAG->getMachineNode(Opc, dl, N->getValueType(0), Ops);
1983
1984 // Transfer memoperands.
1985 MachineMemOperand *MemOp = cast<MemIntrinsicSDNode>(N)->getMemOperand();
1986 CurDAG->setNodeMemRefs(cast<MachineSDNode>(St), {MemOp});
1987
1988 ReplaceNode(N, St);
1989}
1990
1991void AArch64DAGToDAGISel::SelectPredicatedStore(SDNode *N, unsigned NumVecs,
1992 unsigned Scale, unsigned Opc_rr,
1993 unsigned Opc_ri) {
1994 SDLoc dl(N);
1995
1996 // Form a REG_SEQUENCE to force register allocation.
1997 SmallVector<SDValue, 4> Regs(N->op_begin() + 2, N->op_begin() + 2 + NumVecs);
1998 SDValue RegSeq = createZTuple(Regs);
1999
2000 // Optimize addressing mode.
2001 unsigned Opc;
2003 std::tie(Opc, Base, Offset) = findAddrModeSVELoadStore(
2004 N, Opc_rr, Opc_ri, N->getOperand(NumVecs + 3),
2005 CurDAG->getTargetConstant(0, dl, MVT::i64), Scale);
2006
2007 SDValue Ops[] = {RegSeq, N->getOperand(NumVecs + 2), // predicate
2008 Base, // address
2009 Offset, // offset
2010 N->getOperand(0)}; // chain
2011 SDNode *St = CurDAG->getMachineNode(Opc, dl, N->getValueType(0), Ops);
2012
2013 ReplaceNode(N, St);
2014}
2015
2016bool AArch64DAGToDAGISel::SelectAddrModeFrameIndexSVE(SDValue N, SDValue &Base,
2017 SDValue &OffImm) {
2018 SDLoc dl(N);
2019 const DataLayout &DL = CurDAG->getDataLayout();
2020 const TargetLowering *TLI = getTargetLowering();
2021
2022 // Try to match it for the frame address
2023 if (auto FINode = dyn_cast<FrameIndexSDNode>(N)) {
2024 int FI = FINode->getIndex();
2025 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy(DL));
2026 OffImm = CurDAG->getTargetConstant(0, dl, MVT::i64);
2027 return true;
2028 }
2029
2030 return false;
2031}
2032
2033void AArch64DAGToDAGISel::SelectPostStore(SDNode *N, unsigned NumVecs,
2034 unsigned Opc) {
2035 SDLoc dl(N);
2036 EVT VT = N->getOperand(2)->getValueType(0);
2037 const EVT ResTys[] = {MVT::i64, // Type of the write back register
2038 MVT::Other}; // Type for the Chain
2039
2040 // Form a REG_SEQUENCE to force register allocation.
2041 bool Is128Bit = VT.getSizeInBits() == 128;
2042 SmallVector<SDValue, 4> Regs(N->op_begin() + 1, N->op_begin() + 1 + NumVecs);
2043 SDValue RegSeq = Is128Bit ? createQTuple(Regs) : createDTuple(Regs);
2044
2045 SDValue Ops[] = {RegSeq,
2046 N->getOperand(NumVecs + 1), // base register
2047 N->getOperand(NumVecs + 2), // Incremental
2048 N->getOperand(0)}; // Chain
2049 SDNode *St = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
2050
2051 ReplaceNode(N, St);
2052}
2053
2054namespace {
2055/// WidenVector - Given a value in the V64 register class, produce the
2056/// equivalent value in the V128 register class.
2057class WidenVector {
2058 SelectionDAG &DAG;
2059
2060public:
2061 WidenVector(SelectionDAG &DAG) : DAG(DAG) {}
2062
2063 SDValue operator()(SDValue V64Reg) {
2064 EVT VT = V64Reg.getValueType();
2065 unsigned NarrowSize = VT.getVectorNumElements();
2066 MVT EltTy = VT.getVectorElementType().getSimpleVT();
2067 MVT WideTy = MVT::getVectorVT(EltTy, 2 * NarrowSize);
2068 SDLoc DL(V64Reg);
2069
2070 SDValue Undef =
2071 SDValue(DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, WideTy), 0);
2072 return DAG.getTargetInsertSubreg(AArch64::dsub, DL, WideTy, Undef, V64Reg);
2073 }
2074};
2075} // namespace
2076
2077/// NarrowVector - Given a value in the V128 register class, produce the
2078/// equivalent value in the V64 register class.
2080 EVT VT = V128Reg.getValueType();
2081 unsigned WideSize = VT.getVectorNumElements();
2082 MVT EltTy = VT.getVectorElementType().getSimpleVT();
2083 MVT NarrowTy = MVT::getVectorVT(EltTy, WideSize / 2);
2084
2085 return DAG.getTargetExtractSubreg(AArch64::dsub, SDLoc(V128Reg), NarrowTy,
2086 V128Reg);
2087}
2088
2089void AArch64DAGToDAGISel::SelectLoadLane(SDNode *N, unsigned NumVecs,
2090 unsigned Opc) {
2091 SDLoc dl(N);
2092 EVT VT = N->getValueType(0);
2093 bool Narrow = VT.getSizeInBits() == 64;
2094
2095 // Form a REG_SEQUENCE to force register allocation.
2096 SmallVector<SDValue, 4> Regs(N->op_begin() + 2, N->op_begin() + 2 + NumVecs);
2097
2098 if (Narrow)
2099 transform(Regs, Regs.begin(),
2100 WidenVector(*CurDAG));
2101
2102 SDValue RegSeq = createQTuple(Regs);
2103
2104 const EVT ResTys[] = {MVT::Untyped, MVT::Other};
2105
2106 unsigned LaneNo =
2107 cast<ConstantSDNode>(N->getOperand(NumVecs + 2))->getZExtValue();
2108
2109 SDValue Ops[] = {RegSeq, CurDAG->getTargetConstant(LaneNo, dl, MVT::i64),
2110 N->getOperand(NumVecs + 3), N->getOperand(0)};
2111 SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
2112 SDValue SuperReg = SDValue(Ld, 0);
2113
2114 EVT WideVT = RegSeq.getOperand(1)->getValueType(0);
2115 static const unsigned QSubs[] = { AArch64::qsub0, AArch64::qsub1,
2116 AArch64::qsub2, AArch64::qsub3 };
2117 for (unsigned i = 0; i < NumVecs; ++i) {
2118 SDValue NV = CurDAG->getTargetExtractSubreg(QSubs[i], dl, WideVT, SuperReg);
2119 if (Narrow)
2120 NV = NarrowVector(NV, *CurDAG);
2121 ReplaceUses(SDValue(N, i), NV);
2122 }
2123
2124 ReplaceUses(SDValue(N, NumVecs), SDValue(Ld, 1));
2125 CurDAG->RemoveDeadNode(N);
2126}
2127
2128void AArch64DAGToDAGISel::SelectPostLoadLane(SDNode *N, unsigned NumVecs,
2129 unsigned Opc) {
2130 SDLoc dl(N);
2131 EVT VT = N->getValueType(0);
2132 bool Narrow = VT.getSizeInBits() == 64;
2133
2134 // Form a REG_SEQUENCE to force register allocation.
2135 SmallVector<SDValue, 4> Regs(N->op_begin() + 1, N->op_begin() + 1 + NumVecs);
2136
2137 if (Narrow)
2138 transform(Regs, Regs.begin(),
2139 WidenVector(*CurDAG));
2140
2141 SDValue RegSeq = createQTuple(Regs);
2142
2143 const EVT ResTys[] = {MVT::i64, // Type of the write back register
2144 RegSeq->getValueType(0), MVT::Other};
2145
2146 unsigned LaneNo =
2147 cast<ConstantSDNode>(N->getOperand(NumVecs + 1))->getZExtValue();
2148
2149 SDValue Ops[] = {RegSeq,
2150 CurDAG->getTargetConstant(LaneNo, dl,
2151 MVT::i64), // Lane Number
2152 N->getOperand(NumVecs + 2), // Base register
2153 N->getOperand(NumVecs + 3), // Incremental
2154 N->getOperand(0)};
2155 SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
2156
2157 // Update uses of the write back register
2158 ReplaceUses(SDValue(N, NumVecs), SDValue(Ld, 0));
2159
2160 // Update uses of the vector list
2161 SDValue SuperReg = SDValue(Ld, 1);
2162 if (NumVecs == 1) {
2163 ReplaceUses(SDValue(N, 0),
2164 Narrow ? NarrowVector(SuperReg, *CurDAG) : SuperReg);
2165 } else {
2166 EVT WideVT = RegSeq.getOperand(1)->getValueType(0);
2167 static const unsigned QSubs[] = { AArch64::qsub0, AArch64::qsub1,
2168 AArch64::qsub2, AArch64::qsub3 };
2169 for (unsigned i = 0; i < NumVecs; ++i) {
2170 SDValue NV = CurDAG->getTargetExtractSubreg(QSubs[i], dl, WideVT,
2171 SuperReg);
2172 if (Narrow)
2173 NV = NarrowVector(NV, *CurDAG);
2174 ReplaceUses(SDValue(N, i), NV);
2175 }
2176 }
2177
2178 // Update the Chain
2179 ReplaceUses(SDValue(N, NumVecs + 1), SDValue(Ld, 2));
2180 CurDAG->RemoveDeadNode(N);
2181}
2182
2183void AArch64DAGToDAGISel::SelectStoreLane(SDNode *N, unsigned NumVecs,
2184 unsigned Opc) {
2185 SDLoc dl(N);
2186 EVT VT = N->getOperand(2)->getValueType(0);
2187 bool Narrow = VT.getSizeInBits() == 64;
2188
2189 // Form a REG_SEQUENCE to force register allocation.
2190 SmallVector<SDValue, 4> Regs(N->op_begin() + 2, N->op_begin() + 2 + NumVecs);
2191
2192 if (Narrow)
2193 transform(Regs, Regs.begin(),
2194 WidenVector(*CurDAG));
2195
2196 SDValue RegSeq = createQTuple(Regs);
2197
2198 unsigned LaneNo =
2199 cast<ConstantSDNode>(N->getOperand(NumVecs + 2))->getZExtValue();
2200
2201 SDValue Ops[] = {RegSeq, CurDAG->getTargetConstant(LaneNo, dl, MVT::i64),
2202 N->getOperand(NumVecs + 3), N->getOperand(0)};
2203 SDNode *St = CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops);
2204
2205 // Transfer memoperands.
2206 MachineMemOperand *MemOp = cast<MemIntrinsicSDNode>(N)->getMemOperand();
2207 CurDAG->setNodeMemRefs(cast<MachineSDNode>(St), {MemOp});
2208
2209 ReplaceNode(N, St);
2210}
2211
2212void AArch64DAGToDAGISel::SelectPostStoreLane(SDNode *N, unsigned NumVecs,
2213 unsigned Opc) {
2214 SDLoc dl(N);
2215 EVT VT = N->getOperand(2)->getValueType(0);
2216 bool Narrow = VT.getSizeInBits() == 64;
2217
2218 // Form a REG_SEQUENCE to force register allocation.
2219 SmallVector<SDValue, 4> Regs(N->op_begin() + 1, N->op_begin() + 1 + NumVecs);
2220
2221 if (Narrow)
2222 transform(Regs, Regs.begin(),
2223 WidenVector(*CurDAG));
2224
2225 SDValue RegSeq = createQTuple(Regs);
2226
2227 const EVT ResTys[] = {MVT::i64, // Type of the write back register
2228 MVT::Other};
2229
2230 unsigned LaneNo =
2231 cast<ConstantSDNode>(N->getOperand(NumVecs + 1))->getZExtValue();
2232
2233 SDValue Ops[] = {RegSeq, CurDAG->getTargetConstant(LaneNo, dl, MVT::i64),
2234 N->getOperand(NumVecs + 2), // Base Register
2235 N->getOperand(NumVecs + 3), // Incremental
2236 N->getOperand(0)};
2237 SDNode *St = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
2238
2239 // Transfer memoperands.
2240 MachineMemOperand *MemOp = cast<MemIntrinsicSDNode>(N)->getMemOperand();
2241 CurDAG->setNodeMemRefs(cast<MachineSDNode>(St), {MemOp});
2242
2243 ReplaceNode(N, St);
2244}
2245
2247 unsigned &Opc, SDValue &Opd0,
2248 unsigned &LSB, unsigned &MSB,
2249 unsigned NumberOfIgnoredLowBits,
2250 bool BiggerPattern) {
2251 assert(N->getOpcode() == ISD::AND &&
2252 "N must be a AND operation to call this function");
2253
2254 EVT VT = N->getValueType(0);
2255
2256 // Here we can test the type of VT and return false when the type does not
2257 // match, but since it is done prior to that call in the current context
2258 // we turned that into an assert to avoid redundant code.
2259 assert((VT == MVT::i32 || VT == MVT::i64) &&
2260 "Type checking must have been done before calling this function");
2261
2262 // FIXME: simplify-demanded-bits in DAGCombine will probably have
2263 // changed the AND node to a 32-bit mask operation. We'll have to
2264 // undo that as part of the transform here if we want to catch all
2265 // the opportunities.
2266 // Currently the NumberOfIgnoredLowBits argument helps to recover
2267 // from these situations when matching bigger pattern (bitfield insert).
2268
2269 // For unsigned extracts, check for a shift right and mask
2270 uint64_t AndImm = 0;
2271 if (!isOpcWithIntImmediate(N, ISD::AND, AndImm))
2272 return false;
2273
2274 const SDNode *Op0 = N->getOperand(0).getNode();
2275
2276 // Because of simplify-demanded-bits in DAGCombine, the mask may have been
2277 // simplified. Try to undo that
2278 AndImm |= maskTrailingOnes<uint64_t>(NumberOfIgnoredLowBits);
2279
2280 // The immediate is a mask of the low bits iff imm & (imm+1) == 0
2281 if (AndImm & (AndImm + 1))
2282 return false;
2283
2284 bool ClampMSB = false;
2285 uint64_t SrlImm = 0;
2286 // Handle the SRL + ANY_EXTEND case.
2287 if (VT == MVT::i64 && Op0->getOpcode() == ISD::ANY_EXTEND &&
2288 isOpcWithIntImmediate(Op0->getOperand(0).getNode(), ISD::SRL, SrlImm)) {
2289 // Extend the incoming operand of the SRL to 64-bit.
2290 Opd0 = Widen(CurDAG, Op0->getOperand(0).getOperand(0));
2291 // Make sure to clamp the MSB so that we preserve the semantics of the
2292 // original operations.
2293 ClampMSB = true;
2294 } else if (VT == MVT::i32 && Op0->getOpcode() == ISD::TRUNCATE &&
2296 SrlImm)) {
2297 // If the shift result was truncated, we can still combine them.
2298 Opd0 = Op0->getOperand(0).getOperand(0);
2299
2300 // Use the type of SRL node.
2301 VT = Opd0->getValueType(0);
2302 } else if (isOpcWithIntImmediate(Op0, ISD::SRL, SrlImm)) {
2303 Opd0 = Op0->getOperand(0);
2304 ClampMSB = (VT == MVT::i32);
2305 } else if (BiggerPattern) {
2306 // Let's pretend a 0 shift right has been performed.
2307 // The resulting code will be at least as good as the original one
2308 // plus it may expose more opportunities for bitfield insert pattern.
2309 // FIXME: Currently we limit this to the bigger pattern, because
2310 // some optimizations expect AND and not UBFM.
2311 Opd0 = N->getOperand(0);
2312 } else
2313 return false;
2314
2315 // Bail out on large immediates. This happens when no proper
2316 // combining/constant folding was performed.
2317 if (!BiggerPattern && (SrlImm <= 0 || SrlImm >= VT.getSizeInBits())) {
2318 LLVM_DEBUG(
2319 (dbgs() << N
2320 << ": Found large shift immediate, this should not happen\n"));
2321 return false;
2322 }
2323
2324 LSB = SrlImm;
2325 MSB = SrlImm +
2326 (VT == MVT::i32 ? llvm::countr_one<uint32_t>(AndImm)
2327 : llvm::countr_one<uint64_t>(AndImm)) -
2328 1;
2329 if (ClampMSB)
2330 // Since we're moving the extend before the right shift operation, we need
2331 // to clamp the MSB to make sure we don't shift in undefined bits instead of
2332 // the zeros which would get shifted in with the original right shift
2333 // operation.
2334 MSB = MSB > 31 ? 31 : MSB;
2335
2336 Opc = VT == MVT::i32 ? AArch64::UBFMWri : AArch64::UBFMXri;
2337 return true;
2338}
2339
2340static bool isBitfieldExtractOpFromSExtInReg(SDNode *N, unsigned &Opc,
2341 SDValue &Opd0, unsigned &Immr,
2342 unsigned &Imms) {
2343 assert(N->getOpcode() == ISD::SIGN_EXTEND_INREG);
2344
2345 EVT VT = N->getValueType(0);
2346 unsigned BitWidth = VT.getSizeInBits();
2347 assert((VT == MVT::i32 || VT == MVT::i64) &&
2348 "Type checking must have been done before calling this function");
2349
2350 SDValue Op = N->getOperand(0);
2351 if (Op->getOpcode() == ISD::TRUNCATE) {
2352 Op = Op->getOperand(0);
2353 VT = Op->getValueType(0);
2354 BitWidth = VT.getSizeInBits();
2355 }
2356
2357 uint64_t ShiftImm;
2358 if (!isOpcWithIntImmediate(Op.getNode(), ISD::SRL, ShiftImm) &&
2359 !isOpcWithIntImmediate(Op.getNode(), ISD::SRA, ShiftImm))
2360 return false;
2361
2362 unsigned Width = cast<VTSDNode>(N->getOperand(1))->getVT().getSizeInBits();
2363 if (ShiftImm + Width > BitWidth)
2364 return false;
2365
2366 Opc = (VT == MVT::i32) ? AArch64::SBFMWri : AArch64::SBFMXri;
2367 Opd0 = Op.getOperand(0);
2368 Immr = ShiftImm;
2369 Imms = ShiftImm + Width - 1;
2370 return true;
2371}
2372
2373static bool isSeveralBitsExtractOpFromShr(SDNode *N, unsigned &Opc,
2374 SDValue &Opd0, unsigned &LSB,
2375 unsigned &MSB) {
2376 // We are looking for the following pattern which basically extracts several
2377 // continuous bits from the source value and places it from the LSB of the
2378 // destination value, all other bits of the destination value or set to zero:
2379 //
2380 // Value2 = AND Value, MaskImm
2381 // SRL Value2, ShiftImm
2382 //
2383 // with MaskImm >> ShiftImm to search for the bit width.
2384 //
2385 // This gets selected into a single UBFM:
2386 //
2387 // UBFM Value, ShiftImm, Log2_64(MaskImm)
2388 //
2389
2390 if (N->getOpcode() != ISD::SRL)
2391 return false;
2392
2393 uint64_t AndMask = 0;
2394 if (!isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::AND, AndMask))
2395 return false;
2396
2397 Opd0 = N->getOperand(0).getOperand(0);
2398
2399 uint64_t SrlImm = 0;
2400 if (!isIntImmediate(N->getOperand(1), SrlImm))
2401 return false;
2402
2403 // Check whether we really have several bits extract here.
2404 if (!isMask_64(AndMask >> SrlImm))
2405 return false;
2406
2407 Opc = N->getValueType(0) == MVT::i32 ? AArch64::UBFMWri : AArch64::UBFMXri;
2408 LSB = SrlImm;
2409 MSB = llvm::Log2_64(AndMask);
2410 return true;
2411}
2412
2413static bool isBitfieldExtractOpFromShr(SDNode *N, unsigned &Opc, SDValue &Opd0,
2414 unsigned &Immr, unsigned &Imms,
2415 bool BiggerPattern) {
2416 assert((N->getOpcode() == ISD::SRA || N->getOpcode() == ISD::SRL) &&
2417 "N must be a SHR/SRA operation to call this function");
2418
2419 EVT VT = N->getValueType(0);
2420
2421 // Here we can test the type of VT and return false when the type does not
2422 // match, but since it is done prior to that call in the current context
2423 // we turned that into an assert to avoid redundant code.
2424 assert((VT == MVT::i32 || VT == MVT::i64) &&
2425 "Type checking must have been done before calling this function");
2426
2427 // Check for AND + SRL doing several bits extract.
2428 if (isSeveralBitsExtractOpFromShr(N, Opc, Opd0, Immr, Imms))
2429 return true;
2430
2431 // We're looking for a shift of a shift.
2432 uint64_t ShlImm = 0;
2433 uint64_t TruncBits = 0;
2434 if (isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::SHL, ShlImm)) {
2435 Opd0 = N->getOperand(0).getOperand(0);
2436 } else if (VT == MVT::i32 && N->getOpcode() == ISD::SRL &&
2437 N->getOperand(0).getNode()->getOpcode() == ISD::TRUNCATE) {
2438 // We are looking for a shift of truncate. Truncate from i64 to i32 could
2439 // be considered as setting high 32 bits as zero. Our strategy here is to
2440 // always generate 64bit UBFM. This consistency will help the CSE pass
2441 // later find more redundancy.
2442 Opd0 = N->getOperand(0).getOperand(0);
2443 TruncBits = Opd0->getValueType(0).getSizeInBits() - VT.getSizeInBits();
2444 VT = Opd0.getValueType();
2445 assert(VT == MVT::i64 && "the promoted type should be i64");
2446 } else if (BiggerPattern) {
2447 // Let's pretend a 0 shift left has been performed.
2448 // FIXME: Currently we limit this to the bigger pattern case,
2449 // because some optimizations expect AND and not UBFM
2450 Opd0 = N->getOperand(0);
2451 } else
2452 return false;
2453
2454 // Missing combines/constant folding may have left us with strange
2455 // constants.
2456 if (ShlImm >= VT.getSizeInBits()) {
2457 LLVM_DEBUG(
2458 (dbgs() << N
2459 << ": Found large shift immediate, this should not happen\n"));
2460 return false;
2461 }
2462
2463 uint64_t SrlImm = 0;
2464 if (!isIntImmediate(N->getOperand(1), SrlImm))
2465 return false;
2466
2467 assert(SrlImm > 0 && SrlImm < VT.getSizeInBits() &&
2468 "bad amount in shift node!");
2469 int immr = SrlImm - ShlImm;
2470 Immr = immr < 0 ? immr + VT.getSizeInBits() : immr;
2471 Imms = VT.getSizeInBits() - ShlImm - TruncBits - 1;
2472 // SRA requires a signed extraction
2473 if (VT == MVT::i32)
2474 Opc = N->getOpcode() == ISD::SRA ? AArch64::SBFMWri : AArch64::UBFMWri;
2475 else
2476 Opc = N->getOpcode() == ISD::SRA ? AArch64::SBFMXri : AArch64::UBFMXri;
2477 return true;
2478}
2479
2480bool AArch64DAGToDAGISel::tryBitfieldExtractOpFromSExt(SDNode *N) {
2481 assert(N->getOpcode() == ISD::SIGN_EXTEND);
2482
2483 EVT VT = N->getValueType(0);
2484 EVT NarrowVT = N->getOperand(0)->getValueType(0);
2485 if (VT != MVT::i64 || NarrowVT != MVT::i32)
2486 return false;
2487
2488 uint64_t ShiftImm;
2489 SDValue Op = N->getOperand(0);
2490 if (!isOpcWithIntImmediate(Op.getNode(), ISD::SRA, ShiftImm))
2491 return false;
2492
2493 SDLoc dl(N);
2494 // Extend the incoming operand of the shift to 64-bits.
2495 SDValue Opd0 = Widen(CurDAG, Op.getOperand(0));
2496 unsigned Immr = ShiftImm;
2497 unsigned Imms = NarrowVT.getSizeInBits() - 1;
2498 SDValue Ops[] = {Opd0, CurDAG->getTargetConstant(Immr, dl, VT),
2499 CurDAG->getTargetConstant(Imms, dl, VT)};
2500 CurDAG->SelectNodeTo(N, AArch64::SBFMXri, VT, Ops);
2501 return true;
2502}
2503
2504static bool isBitfieldExtractOp(SelectionDAG *CurDAG, SDNode *N, unsigned &Opc,
2505 SDValue &Opd0, unsigned &Immr, unsigned &Imms,
2506 unsigned NumberOfIgnoredLowBits = 0,
2507 bool BiggerPattern = false) {
2508 if (N->getValueType(0) != MVT::i32 && N->getValueType(0) != MVT::i64)
2509 return false;
2510
2511 switch (N->getOpcode()) {
2512 default:
2513 if (!N->isMachineOpcode())
2514 return false;
2515 break;
2516 case ISD::AND:
2517 return isBitfieldExtractOpFromAnd(CurDAG, N, Opc, Opd0, Immr, Imms,
2518 NumberOfIgnoredLowBits, BiggerPattern);
2519 case ISD::SRL:
2520 case ISD::SRA:
2521 return isBitfieldExtractOpFromShr(N, Opc, Opd0, Immr, Imms, BiggerPattern);
2522
2524 return isBitfieldExtractOpFromSExtInReg(N, Opc, Opd0, Immr, Imms);
2525 }
2526
2527 unsigned NOpc = N->getMachineOpcode();
2528 switch (NOpc) {
2529 default:
2530 return false;
2531 case AArch64::SBFMWri:
2532 case AArch64::UBFMWri:
2533 case AArch64::SBFMXri:
2534 case AArch64::UBFMXri:
2535 Opc = NOpc;
2536 Opd0 = N->getOperand(0);
2537 Immr = cast<ConstantSDNode>(N->getOperand(1).getNode())->getZExtValue();
2538 Imms = cast<ConstantSDNode>(N->getOperand(2).getNode())->getZExtValue();
2539 return true;
2540 }
2541 // Unreachable
2542 return false;
2543}
2544
2545bool AArch64DAGToDAGISel::tryBitfieldExtractOp(SDNode *N) {
2546 unsigned Opc, Immr, Imms;
2547 SDValue Opd0;
2548 if (!isBitfieldExtractOp(CurDAG, N, Opc, Opd0, Immr, Imms))
2549 return false;
2550
2551 EVT VT = N->getValueType(0);
2552 SDLoc dl(N);
2553
2554 // If the bit extract operation is 64bit but the original type is 32bit, we
2555 // need to add one EXTRACT_SUBREG.
2556 if ((Opc == AArch64::SBFMXri || Opc == AArch64::UBFMXri) && VT == MVT::i32) {
2557 SDValue Ops64[] = {Opd0, CurDAG->getTargetConstant(Immr, dl, MVT::i64),
2558 CurDAG->getTargetConstant(Imms, dl, MVT::i64)};
2559
2560 SDNode *BFM = CurDAG->getMachineNode(Opc, dl, MVT::i64, Ops64);
2561 SDValue Inner = CurDAG->getTargetExtractSubreg(AArch64::sub_32, dl,
2562 MVT::i32, SDValue(BFM, 0));
2563 ReplaceNode(N, Inner.getNode());
2564 return true;
2565 }
2566
2567 SDValue Ops[] = {Opd0, CurDAG->getTargetConstant(Immr, dl, VT),
2568 CurDAG->getTargetConstant(Imms, dl, VT)};
2569 CurDAG->SelectNodeTo(N, Opc, VT, Ops);
2570 return true;
2571}
2572
2573/// Does DstMask form a complementary pair with the mask provided by
2574/// BitsToBeInserted, suitable for use in a BFI instruction. Roughly speaking,
2575/// this asks whether DstMask zeroes precisely those bits that will be set by
2576/// the other half.
2577static bool isBitfieldDstMask(uint64_t DstMask, const APInt &BitsToBeInserted,
2578 unsigned NumberOfIgnoredHighBits, EVT VT) {
2579 assert((VT == MVT::i32 || VT == MVT::i64) &&
2580 "i32 or i64 mask type expected!");
2581 unsigned BitWidth = VT.getSizeInBits() - NumberOfIgnoredHighBits;
2582
2583 APInt SignificantDstMask = APInt(BitWidth, DstMask);
2584 APInt SignificantBitsToBeInserted = BitsToBeInserted.zextOrTrunc(BitWidth);
2585
2586 return (SignificantDstMask & SignificantBitsToBeInserted) == 0 &&
2587 (SignificantDstMask | SignificantBitsToBeInserted).isAllOnes();
2588}
2589
2590// Look for bits that will be useful for later uses.
2591// A bit is consider useless as soon as it is dropped and never used
2592// before it as been dropped.
2593// E.g., looking for useful bit of x
2594// 1. y = x & 0x7
2595// 2. z = y >> 2
2596// After #1, x useful bits are 0x7, then the useful bits of x, live through
2597// y.
2598// After #2, the useful bits of x are 0x4.
2599// However, if x is used on an unpredicatable instruction, then all its bits
2600// are useful.
2601// E.g.
2602// 1. y = x & 0x7
2603// 2. z = y >> 2
2604// 3. str x, [@x]
2605static void getUsefulBits(SDValue Op, APInt &UsefulBits, unsigned Depth = 0);
2606
2608 unsigned Depth) {
2609 uint64_t Imm =
2610 cast<const ConstantSDNode>(Op.getOperand(1).getNode())->getZExtValue();
2611 Imm = AArch64_AM::decodeLogicalImmediate(Imm, UsefulBits.getBitWidth());
2612 UsefulBits &= APInt(UsefulBits.getBitWidth(), Imm);
2613 getUsefulBits(Op, UsefulBits, Depth + 1);
2614}
2615
2617 uint64_t Imm, uint64_t MSB,
2618 unsigned Depth) {
2619 // inherit the bitwidth value
2620 APInt OpUsefulBits(UsefulBits);
2621 OpUsefulBits = 1;
2622
2623 if (MSB >= Imm) {
2624 OpUsefulBits <<= MSB - Imm + 1;
2625 --OpUsefulBits;
2626 // The interesting part will be in the lower part of the result
2627 getUsefulBits(Op, OpUsefulBits, Depth + 1);
2628 // The interesting part was starting at Imm in the argument
2629 OpUsefulBits <<= Imm;
2630 } else {
2631 OpUsefulBits <<= MSB + 1;
2632 --OpUsefulBits;
2633 // The interesting part will be shifted in the result
2634 OpUsefulBits <<= OpUsefulBits.getBitWidth() - Imm;
2635 getUsefulBits(Op, OpUsefulBits, Depth + 1);
2636 // The interesting part was at zero in the argument
2637 OpUsefulBits.lshrInPlace(OpUsefulBits.getBitWidth() - Imm);
2638 }
2639
2640 UsefulBits &= OpUsefulBits;
2641}
2642
2643static void getUsefulBitsFromUBFM(SDValue Op, APInt &UsefulBits,
2644 unsigned Depth) {
2645 uint64_t Imm =
2646 cast<const ConstantSDNode>(Op.getOperand(1).getNode())->getZExtValue();
2647 uint64_t MSB =
2648 cast<const ConstantSDNode>(Op.getOperand(2).getNode())->getZExtValue();
2649
2650 getUsefulBitsFromBitfieldMoveOpd(Op, UsefulBits, Imm, MSB, Depth);
2651}
2652
2654 unsigned Depth) {
2655 uint64_t ShiftTypeAndValue =
2656 cast<const ConstantSDNode>(Op.getOperand(2).getNode())->getZExtValue();
2657 APInt Mask(UsefulBits);
2658 Mask.clearAllBits();
2659 Mask.flipAllBits();
2660
2661 if (AArch64_AM::getShiftType(ShiftTypeAndValue) == AArch64_AM::LSL) {
2662 // Shift Left
2663 uint64_t ShiftAmt = AArch64_AM::getShiftValue(ShiftTypeAndValue);
2664 Mask <<= ShiftAmt;
2665 getUsefulBits(Op, Mask, Depth + 1);
2666 Mask.lshrInPlace(ShiftAmt);
2667 } else if (AArch64_AM::getShiftType(ShiftTypeAndValue) == AArch64_AM::LSR) {
2668 // Shift Right
2669 // We do not handle AArch64_AM::ASR, because the sign will change the
2670 // number of useful bits
2671 uint64_t ShiftAmt = AArch64_AM::getShiftValue(ShiftTypeAndValue);
2672 Mask.lshrInPlace(ShiftAmt);
2673 getUsefulBits(Op, Mask, Depth + 1);
2674 Mask <<= ShiftAmt;
2675 } else
2676 return;
2677
2678 UsefulBits &= Mask;
2679}
2680
2681static void getUsefulBitsFromBFM(SDValue Op, SDValue Orig, APInt &UsefulBits,
2682 unsigned Depth) {
2683 uint64_t Imm =
2684 cast<const ConstantSDNode>(Op.getOperand(2).getNode())->getZExtValue();
2685 uint64_t MSB =
2686 cast<const ConstantSDNode>(Op.getOperand(3).getNode())->getZExtValue();
2687
2688 APInt OpUsefulBits(UsefulBits);
2689 OpUsefulBits = 1;
2690
2691 APInt ResultUsefulBits(UsefulBits.getBitWidth(), 0);
2692 ResultUsefulBits.flipAllBits();
2693 APInt Mask(UsefulBits.getBitWidth(), 0);
2694
2695 getUsefulBits(Op, ResultUsefulBits, Depth + 1);
2696
2697 if (MSB >= Imm) {
2698 // The instruction is a BFXIL.
2699 uint64_t Width = MSB - Imm + 1;
2700 uint64_t LSB = Imm;
2701
2702 OpUsefulBits <<= Width;
2703 --OpUsefulBits;
2704
2705 if (Op.getOperand(1) == Orig) {
2706 // Copy the low bits from the result to bits starting from LSB.
2707 Mask = ResultUsefulBits & OpUsefulBits;
2708 Mask <<= LSB;
2709 }
2710
2711 if (Op.getOperand(0) == Orig)
2712 // Bits starting from LSB in the input contribute to the result.
2713 Mask |= (ResultUsefulBits & ~OpUsefulBits);
2714 } else {
2715 // The instruction is a BFI.
2716 uint64_t Width = MSB + 1;
2717 uint64_t LSB = UsefulBits.getBitWidth() - Imm;
2718
2719 OpUsefulBits <<= Width;
2720 --OpUsefulBits;
2721 OpUsefulBits <<= LSB;
2722
2723 if (Op.getOperand(1) == Orig) {
2724 // Copy the bits from the result to the zero bits.
2725 Mask = ResultUsefulBits & OpUsefulBits;
2726 Mask.lshrInPlace(LSB);
2727 }
2728
2729 if (Op.getOperand(0) == Orig)
2730 Mask |= (ResultUsefulBits & ~OpUsefulBits);
2731 }
2732
2733 UsefulBits &= Mask;
2734}
2735
2736static void getUsefulBitsForUse(SDNode *UserNode, APInt &UsefulBits,
2737 SDValue Orig, unsigned Depth) {
2738
2739 // Users of this node should have already been instruction selected
2740 // FIXME: Can we turn that into an assert?
2741 if (!UserNode->isMachineOpcode())
2742 return;
2743
2744 switch (UserNode->getMachineOpcode()) {
2745 default:
2746 return;
2747 case AArch64::ANDSWri:
2748 case AArch64::ANDSXri:
2749 case AArch64::ANDWri:
2750 case AArch64::ANDXri:
2751 // We increment Depth only when we call the getUsefulBits
2752 return getUsefulBitsFromAndWithImmediate(SDValue(UserNode, 0), UsefulBits,
2753 Depth);
2754 case AArch64::UBFMWri:
2755 case AArch64::UBFMXri:
2756 return getUsefulBitsFromUBFM(SDValue(UserNode, 0), UsefulBits, Depth);
2757
2758 case AArch64::ORRWrs:
2759 case AArch64::ORRXrs:
2760 if (UserNode->getOperand(0) != Orig && UserNode->getOperand(1) == Orig)
2761 getUsefulBitsFromOrWithShiftedReg(SDValue(UserNode, 0), UsefulBits,
2762 Depth);
2763 return;
2764 case AArch64::BFMWri:
2765 case AArch64::BFMXri:
2766 return getUsefulBitsFromBFM(SDValue(UserNode, 0), Orig, UsefulBits, Depth);
2767
2768 case AArch64::STRBBui:
2769 case AArch64::STURBBi:
2770 if (UserNode->getOperand(0) != Orig)
2771 return;
2772 UsefulBits &= APInt(UsefulBits.getBitWidth(), 0xff);
2773 return;
2774
2775 case AArch64::STRHHui:
2776 case AArch64::STURHHi:
2777 if (UserNode->getOperand(0) != Orig)
2778 return;
2779 UsefulBits &= APInt(UsefulBits.getBitWidth(), 0xffff);
2780 return;
2781 }
2782}
2783
2784static void getUsefulBits(SDValue Op, APInt &UsefulBits, unsigned Depth) {
2786 return;
2787 // Initialize UsefulBits
2788 if (!Depth) {
2789 unsigned Bitwidth = Op.getScalarValueSizeInBits();
2790 // At the beginning, assume every produced bits is useful
2791 UsefulBits = APInt(Bitwidth, 0);
2792 UsefulBits.flipAllBits();
2793 }
2794 APInt UsersUsefulBits(UsefulBits.getBitWidth(), 0);
2795
2796 for (SDNode *Node : Op.getNode()->uses()) {
2797 // A use cannot produce useful bits
2798 APInt UsefulBitsForUse = APInt(UsefulBits);
2799 getUsefulBitsForUse(Node, UsefulBitsForUse, Op, Depth);
2800 UsersUsefulBits |= UsefulBitsForUse;
2801 }
2802 // UsefulBits contains the produced bits that are meaningful for the
2803 // current definition, thus a user cannot make a bit meaningful at
2804 // this point
2805 UsefulBits &= UsersUsefulBits;
2806}
2807
2808/// Create a machine node performing a notional SHL of Op by ShlAmount. If
2809/// ShlAmount is negative, do a (logical) right-shift instead. If ShlAmount is
2810/// 0, return Op unchanged.
2811static SDValue getLeftShift(SelectionDAG *CurDAG, SDValue Op, int ShlAmount) {
2812 if (ShlAmount == 0)
2813 return Op;
2814
2815 EVT VT = Op.getValueType();
2816 SDLoc dl(Op);
2817 unsigned BitWidth = VT.getSizeInBits();
2818 unsigned UBFMOpc = BitWidth == 32 ? AArch64::UBFMWri : AArch64::UBFMXri;
2819
2820 SDNode *ShiftNode;
2821 if (ShlAmount > 0) {
2822 // LSL wD, wN, #Amt == UBFM wD, wN, #32-Amt, #31-Amt
2823 ShiftNode = CurDAG->getMachineNode(
2824 UBFMOpc, dl, VT, Op,
2825 CurDAG->getTargetConstant(BitWidth - ShlAmount, dl, VT),
2826 CurDAG->getTargetConstant(BitWidth - 1 - ShlAmount, dl, VT));
2827 } else {
2828 // LSR wD, wN, #Amt == UBFM wD, wN, #Amt, #32-1
2829 assert(ShlAmount < 0 && "expected right shift");
2830 int ShrAmount = -ShlAmount;
2831 ShiftNode = CurDAG->getMachineNode(
2832 UBFMOpc, dl, VT, Op, CurDAG->getTargetConstant(ShrAmount, dl, VT),
2833 CurDAG->getTargetConstant(BitWidth - 1, dl, VT));
2834 }
2835
2836 return SDValue(ShiftNode, 0);
2837}
2838
2839// For bit-field-positioning pattern "(and (shl VAL, N), ShiftedMask)".
2841 bool BiggerPattern,
2842 const uint64_t NonZeroBits,
2843 SDValue &Src, int &DstLSB,
2844 int &Width);
2845
2846// For bit-field-positioning pattern "shl VAL, N)".
2848 bool BiggerPattern,
2849 const uint64_t NonZeroBits,
2850 SDValue &Src, int &DstLSB,
2851 int &Width);
2852
2853/// Does this tree qualify as an attempt to move a bitfield into position,
2854/// essentially "(and (shl VAL, N), Mask)" or (shl VAL, N).
2856 bool BiggerPattern, SDValue &Src,
2857 int &DstLSB, int &Width) {
2858 EVT VT = Op.getValueType();
2859 unsigned BitWidth = VT.getSizeInBits();
2860 (void)BitWidth;
2861 assert(BitWidth == 32 || BitWidth == 64);
2862
2863 KnownBits Known = CurDAG->computeKnownBits(Op);
2864
2865 // Non-zero in the sense that they're not provably zero, which is the key
2866 // point if we want to use this value
2867 const uint64_t NonZeroBits = (~Known.Zero).getZExtValue();
2868 if (!isShiftedMask_64(NonZeroBits))
2869 return false;
2870
2871 switch (Op.getOpcode()) {
2872 default:
2873 break;
2874 case ISD::AND:
2875 return isBitfieldPositioningOpFromAnd(CurDAG, Op, BiggerPattern,
2876 NonZeroBits, Src, DstLSB, Width);
2877 case ISD::SHL:
2878 return isBitfieldPositioningOpFromShl(CurDAG, Op, BiggerPattern,
2879 NonZeroBits, Src, DstLSB, Width);
2880 }
2881
2882 return false;
2883}
2884
2886 bool BiggerPattern,
2887 const uint64_t NonZeroBits,
2888 SDValue &Src, int &DstLSB,
2889 int &Width) {
2890 assert(isShiftedMask_64(NonZeroBits) && "Caller guaranteed");
2891
2892 EVT VT = Op.getValueType();
2893 assert((VT == MVT::i32 || VT == MVT::i64) &&
2894 "Caller guarantees VT is one of i32 or i64");
2895 (void)VT;
2896
2897 uint64_t AndImm;
2898 if (!isOpcWithIntImmediate(Op.getNode(), ISD::AND, AndImm))
2899 return false;
2900
2901 // If (~AndImm & NonZeroBits) is not zero at POS, we know that
2902 // 1) (AndImm & (1 << POS) == 0)
2903 // 2) the result of AND is not zero at POS bit (according to NonZeroBits)
2904 //
2905 // 1) and 2) don't agree so something must be wrong (e.g., in
2906 // 'SelectionDAG::computeKnownBits')
2907 assert((~AndImm & NonZeroBits) == 0 &&
2908 "Something must be wrong (e.g., in SelectionDAG::computeKnownBits)");
2909
2910 SDValue AndOp0 = Op.getOperand(0);
2911
2912 uint64_t ShlImm;
2913 SDValue ShlOp0;
2914 if (isOpcWithIntImmediate(AndOp0.getNode(), ISD::SHL, ShlImm)) {
2915 // For pattern "and(shl(val, N), shifted-mask)", 'ShlOp0' is set to 'val'.
2916 ShlOp0 = AndOp0.getOperand(0);
2917 } else if (VT == MVT::i64 && AndOp0.getOpcode() == ISD::ANY_EXTEND &&
2919 ShlImm)) {
2920 // For pattern "and(any_extend(shl(val, N)), shifted-mask)"
2921
2922 // ShlVal == shl(val, N), which is a left shift on a smaller type.
2923 SDValue ShlVal = AndOp0.getOperand(0);
2924
2925 // Since this is after type legalization and ShlVal is extended to MVT::i64,
2926 // expect VT to be MVT::i32.
2927 assert((ShlVal.getValueType() == MVT::i32) && "Expect VT to be MVT::i32.");
2928
2929 // Widens 'val' to MVT::i64 as the source of bit field positioning.
2930 ShlOp0 = Widen(CurDAG, ShlVal.getOperand(0));
2931 } else
2932 return false;
2933
2934 // For !BiggerPattern, bail out if the AndOp0 has more than one use, since
2935 // then we'll end up generating AndOp0+UBFIZ instead of just keeping
2936 // AndOp0+AND.
2937 if (!BiggerPattern && !AndOp0.hasOneUse())
2938 return false;
2939
2940 DstLSB = llvm::countr_zero(NonZeroBits);
2941 Width = llvm::countr_one(NonZeroBits >> DstLSB);
2942
2943 // Bail out on large Width. This happens when no proper combining / constant
2944 // folding was performed.
2945 if (Width >= (int)VT.getSizeInBits()) {
2946 // If VT is i64, Width > 64 is insensible since NonZeroBits is uint64_t, and
2947 // Width == 64 indicates a missed dag-combine from "(and val, AllOnes)" to
2948 // "val".
2949 // If VT is i32, what Width >= 32 means:
2950 // - For "(and (any_extend(shl val, N)), shifted-mask)", the`and` Op
2951 // demands at least 'Width' bits (after dag-combiner). This together with
2952 // `any_extend` Op (undefined higher bits) indicates missed combination
2953 // when lowering the 'and' IR instruction to an machine IR instruction.
2954 LLVM_DEBUG(
2955 dbgs()
2956 << "Found large Width in bit-field-positioning -- this indicates no "
2957 "proper combining / constant folding was performed\n");
2958 return false;
2959 }
2960
2961 // BFI encompasses sufficiently many nodes that it's worth inserting an extra
2962 // LSL/LSR if the mask in NonZeroBits doesn't quite match up with the ISD::SHL
2963 // amount. BiggerPattern is true when this pattern is being matched for BFI,
2964 // BiggerPattern is false when this pattern is being matched for UBFIZ, in
2965 // which case it is not profitable to insert an extra shift.
2966 if (ShlImm != uint64_t(DstLSB) && !BiggerPattern)
2967 return false;
2968
2969 Src = getLeftShift(CurDAG, ShlOp0, ShlImm - DstLSB);
2970 return true;
2971}
2972
2973// For node (shl (and val, mask), N)), returns true if the node is equivalent to
2974// UBFIZ.
2976 SDValue &Src, int &DstLSB,
2977 int &Width) {
2978 // Caller should have verified that N is a left shift with constant shift
2979 // amount; asserts that.
2980 assert(Op.getOpcode() == ISD::SHL &&
2981 "Op.getNode() should be a SHL node to call this function");
2982 assert(isIntImmediateEq(Op.getOperand(1), ShlImm) &&
2983 "Op.getNode() should shift ShlImm to call this function");
2984
2985 uint64_t AndImm = 0;
2986 SDValue Op0 = Op.getOperand(0);
2987 if (!isOpcWithIntImmediate(Op0.getNode(), ISD::AND, AndImm))
2988 return false;
2989
2990 const uint64_t ShiftedAndImm = ((AndImm << ShlImm) >> ShlImm);
2991 if (isMask_64(ShiftedAndImm)) {
2992 // AndImm is a superset of (AllOnes >> ShlImm); in other words, AndImm
2993 // should end with Mask, and could be prefixed with random bits if those
2994 // bits are shifted out.
2995 //
2996 // For example, xyz11111 (with {x,y,z} being 0 or 1) is fine if ShlImm >= 3;
2997 // the AND result corresponding to those bits are shifted out, so it's fine
2998 // to not extract them.
2999 Width = llvm::countr_one(ShiftedAndImm);
3000 DstLSB = ShlImm;
3001 Src = Op0.getOperand(0);
3002 return true;
3003 }
3004 return false;
3005}
3006
3008 bool BiggerPattern,
3009 const uint64_t NonZeroBits,
3010 SDValue &Src, int &DstLSB,
3011 int &Width) {
3012 assert(isShiftedMask_64(NonZeroBits) && "Caller guaranteed");
3013
3014 EVT VT = Op.getValueType();
3015 assert((VT == MVT::i32 || VT == MVT::i64) &&
3016 "Caller guarantees that type is i32 or i64");
3017 (void)VT;
3018
3019 uint64_t ShlImm;
3020 if (!isOpcWithIntImmediate(Op.getNode(), ISD::SHL, ShlImm))
3021 return false;
3022
3023 if (!BiggerPattern && !Op.hasOneUse())
3024 return false;
3025
3026 if (isSeveralBitsPositioningOpFromShl(ShlImm, Op, Src, DstLSB, Width))
3027 return true;
3028
3029 DstLSB = llvm::countr_zero(NonZeroBits);
3030 Width = llvm::countr_one(NonZeroBits >> DstLSB);
3031
3032 if (ShlImm != uint64_t(DstLSB) && !BiggerPattern)
3033 return false;
3034
3035 Src = getLeftShift(CurDAG, Op.getOperand(0), ShlImm - DstLSB);
3036 return true;
3037}
3038
3039static bool isShiftedMask(uint64_t Mask, EVT VT) {
3040 assert(VT == MVT::i32 || VT == MVT::i64);
3041 if (VT == MVT::i32)
3042 return isShiftedMask_32(Mask);
3043 return isShiftedMask_64(Mask);
3044}
3045
3046// Generate a BFI/BFXIL from 'or (and X, MaskImm), OrImm' iff the value being
3047// inserted only sets known zero bits.
3049 assert(N->getOpcode() == ISD::OR && "Expect a OR operation");
3050
3051 EVT VT = N->getValueType(0);
3052 if (VT != MVT::i32 && VT != MVT::i64)
3053 return false;
3054
3055 unsigned BitWidth = VT.getSizeInBits();
3056
3057 uint64_t OrImm;
3058 if (!isOpcWithIntImmediate(N, ISD::OR, OrImm))
3059 return false;
3060
3061 // Skip this transformation if the ORR immediate can be encoded in the ORR.
3062 // Otherwise, we'll trade an AND+ORR for ORR+BFI/BFXIL, which is most likely
3063 // performance neutral.
3065 return false;
3066
3067 uint64_t MaskImm;
3068 SDValue And = N->getOperand(0);
3069 // Must be a single use AND with an immediate operand.
3070 if (!And.hasOneUse() ||
3071 !isOpcWithIntImmediate(And.getNode(), ISD::AND, MaskImm))
3072 return false;
3073
3074 // Compute the Known Zero for the AND as this allows us to catch more general
3075 // cases than just looking for AND with imm.
3076 KnownBits Known = CurDAG->computeKnownBits(And);
3077
3078 // Non-zero in the sense that they're not provably zero, which is the key
3079 // point if we want to use this value.
3080 uint64_t NotKnownZero = (~Known.Zero).getZExtValue();
3081
3082 // The KnownZero mask must be a shifted mask (e.g., 1110..011, 11100..00).
3083 if (!isShiftedMask(Known.Zero.getZExtValue(), VT))
3084 return false;
3085
3086 // The bits being inserted must only set those bits that are known to be zero.
3087 if ((OrImm & NotKnownZero) != 0) {
3088 // FIXME: It's okay if the OrImm sets NotKnownZero bits to 1, but we don't
3089 // currently handle this case.
3090 return false;
3091 }
3092
3093 // BFI/BFXIL dst, src, #lsb, #width.
3094 int LSB = llvm::countr_one(NotKnownZero);
3095 int Width = BitWidth - APInt(BitWidth, NotKnownZero).popcount();
3096
3097 // BFI/BFXIL is an alias of BFM, so translate to BFM operands.
3098 unsigned ImmR = (BitWidth - LSB) % BitWidth;
3099 unsigned ImmS = Width - 1;
3100
3101 // If we're creating a BFI instruction avoid cases where we need more
3102 // instructions to materialize the BFI constant as compared to the original
3103 // ORR. A BFXIL will use the same constant as the original ORR, so the code
3104 // should be no worse in this case.
3105 bool IsBFI = LSB != 0;
3106 uint64_t BFIImm = OrImm >> LSB;
3107 if (IsBFI && !AArch64_AM::isLogicalImmediate(BFIImm, BitWidth)) {
3108 // We have a BFI instruction and we know the constant can't be materialized
3109 // with a ORR-immediate with the zero register.
3110 unsigned OrChunks = 0, BFIChunks = 0;
3111 for (unsigned Shift = 0; Shift < BitWidth; Shift += 16) {
3112 if (((OrImm >> Shift) & 0xFFFF) != 0)
3113 ++OrChunks;
3114 if (((BFIImm >> Shift) & 0xFFFF) != 0)
3115 ++BFIChunks;
3116 }
3117 if (BFIChunks > OrChunks)
3118 return false;
3119 }
3120
3121 // Materialize the constant to be inserted.
3122 SDLoc DL(N);
3123 unsigned MOVIOpc = VT == MVT::i32 ? AArch64::MOVi32imm : AArch64::MOVi64imm;
3124 SDNode *MOVI = CurDAG->getMachineNode(
3125 MOVIOpc, DL, VT, CurDAG->getTargetConstant(BFIImm, DL, VT));
3126
3127 // Create the BFI/BFXIL instruction.
3128 SDValue Ops[] = {And.getOperand(0), SDValue(MOVI, 0),
3129 CurDAG->getTargetConstant(ImmR, DL, VT),
3130 CurDAG->getTargetConstant(ImmS, DL, VT)};
3131 unsigned Opc = (VT == MVT::i32) ? AArch64::BFMWri : AArch64::BFMXri;
3132 CurDAG->SelectNodeTo(N, Opc, VT, Ops);
3133 return true;
3134}
3135
3137 SDValue &ShiftedOperand,
3138 uint64_t &EncodedShiftImm) {
3139 // Avoid folding Dst into ORR-with-shift if Dst has other uses than ORR.
3140 if (!Dst.hasOneUse())
3141 return false;
3142
3143 EVT VT = Dst.getValueType();
3144 assert((VT == MVT::i32 || VT == MVT::i64) &&
3145 "Caller should guarantee that VT is one of i32 or i64");
3146 const unsigned SizeInBits = VT.getSizeInBits();
3147
3148 SDLoc DL(Dst.getNode());
3149 uint64_t AndImm, ShlImm;
3150 if (isOpcWithIntImmediate(Dst.getNode(), ISD::AND, AndImm) &&
3151 isShiftedMask_64(AndImm)) {
3152 // Avoid transforming 'DstOp0' if it has other uses than the AND node.
3153 SDValue DstOp0 = Dst.getOperand(0);
3154 if (!DstOp0.hasOneUse())
3155 return false;
3156
3157 // An example to illustrate the transformation
3158 // From:
3159 // lsr x8, x1, #1
3160 // and x8, x8, #0x3f80
3161 // bfxil x8, x1, #0, #7
3162 // To:
3163 // and x8, x23, #0x7f
3164 // ubfx x9, x23, #8, #7
3165 // orr x23, x8, x9, lsl #7
3166 //
3167 // The number of instructions remains the same, but ORR is faster than BFXIL
3168 // on many AArch64 processors (or as good as BFXIL if not faster). Besides,
3169 // the dependency chain is improved after the transformation.
3170 uint64_t SrlImm;
3171 if (isOpcWithIntImmediate(DstOp0.getNode(), ISD::SRL, SrlImm)) {
3172 uint64_t NumTrailingZeroInShiftedMask = llvm::countr_zero(AndImm);
3173 if ((SrlImm + NumTrailingZeroInShiftedMask) < SizeInBits) {
3174 unsigned MaskWidth =
3175 llvm::countr_one(AndImm >> NumTrailingZeroInShiftedMask);
3176 unsigned UBFMOpc =
3177 (VT == MVT::i32) ? AArch64::UBFMWri : AArch64::UBFMXri;
3178 SDNode *UBFMNode = CurDAG->getMachineNode(
3179 UBFMOpc, DL, VT, DstOp0.getOperand(0),
3180 CurDAG->getTargetConstant(SrlImm + NumTrailingZeroInShiftedMask, DL,
3181 VT),
3182 CurDAG->getTargetConstant(
3183 SrlImm + NumTrailingZeroInShiftedMask + MaskWidth - 1, DL, VT));
3184 ShiftedOperand = SDValue(UBFMNode, 0);
3185 EncodedShiftImm = AArch64_AM::getShifterImm(
3186 AArch64_AM::LSL, NumTrailingZeroInShiftedMask);
3187 return true;
3188 }
3189 }
3190 return false;
3191 }
3192
3193 if (isOpcWithIntImmediate(Dst.getNode(), ISD::SHL, ShlImm)) {
3194 ShiftedOperand = Dst.getOperand(0);
3195 EncodedShiftImm = AArch64_AM::getShifterImm(AArch64_AM::LSL, ShlImm);
3196 return true;
3197 }
3198
3199 uint64_t SrlImm;
3200 if (isOpcWithIntImmediate(Dst.getNode(), ISD::SRL, SrlImm)) {
3201 ShiftedOperand = Dst.getOperand(0);
3202 EncodedShiftImm = AArch64_AM::getShifterImm(AArch64_AM::LSR, SrlImm);
3203 return true;
3204 }
3205 return false;
3206}
3207
3208// Given an 'ISD::OR' node that is going to be selected as BFM, analyze
3209// the operands and select it to AArch64::ORR with shifted registers if
3210// that's more efficient. Returns true iff selection to AArch64::ORR happens.
3211static bool tryOrrWithShift(SDNode *N, SDValue OrOpd0, SDValue OrOpd1,
3212 SDValue Src, SDValue Dst, SelectionDAG *CurDAG,
3213 const bool BiggerPattern) {
3214 EVT VT = N->getValueType(0);
3215 assert(N->getOpcode() == ISD::OR && "Expect N to be an OR node");
3216 assert(((N->getOperand(0) == OrOpd0 && N->getOperand(1) == OrOpd1) ||
3217 (N->getOperand(1) == OrOpd0 && N->getOperand(0) == OrOpd1)) &&
3218 "Expect OrOpd0 and OrOpd1 to be operands of ISD::OR");
3219 assert((VT == MVT::i32 || VT == MVT::i64) &&
3220 "Expect result type to be i32 or i64 since N is combinable to BFM");
3221 SDLoc DL(N);
3222
3223 // Bail out if BFM simplifies away one node in BFM Dst.
3224 if (OrOpd1 != Dst)
3225 return false;
3226
3227 const unsigned OrrOpc = (VT == MVT::i32) ? AArch64::ORRWrs : AArch64::ORRXrs;
3228 // For "BFM Rd, Rn, #immr, #imms", it's known that BFM simplifies away fewer
3229 // nodes from Rn (or inserts additional shift node) if BiggerPattern is true.
3230 if (BiggerPattern) {
3231 uint64_t SrcAndImm;
3232 if (isOpcWithIntImmediate(OrOpd0.getNode(), ISD::AND, SrcAndImm) &&
3233 isMask_64(SrcAndImm) && OrOpd0.getOperand(0) == Src) {
3234 // OrOpd0 = AND Src, #Mask
3235 // So BFM simplifies away one AND node from Src and doesn't simplify away
3236 // nodes from Dst. If ORR with left-shifted operand also simplifies away
3237 // one node (from Rd), ORR is better since it has higher throughput and
3238 // smaller latency than BFM on many AArch64 processors (and for the rest
3239 // ORR is at least as good as BFM).
3240 SDValue ShiftedOperand;
3241 uint64_t EncodedShiftImm;
3242 if (isWorthFoldingIntoOrrWithShift(Dst, CurDAG, ShiftedOperand,
3243 EncodedShiftImm)) {
3244 SDValue Ops[] = {OrOpd0, ShiftedOperand,
3245 CurDAG->getTargetConstant(EncodedShiftImm, DL, VT)};
3246 CurDAG->SelectNodeTo(N, OrrOpc, VT, Ops);
3247 return true;
3248 }
3249 }
3250 return false;
3251 }
3252
3253 assert((!BiggerPattern) && "BiggerPattern should be handled above");
3254
3255 uint64_t ShlImm;
3256 if (isOpcWithIntImmediate(OrOpd0.getNode(), ISD::SHL, ShlImm)) {
3257 if (OrOpd0.getOperand(0) == Src && OrOpd0.hasOneUse()) {
3258 SDValue Ops[] = {
3259 Dst, Src,
3260 CurDAG->getTargetConstant(
3262 CurDAG->SelectNodeTo(N, OrrOpc, VT, Ops);
3263 return true;
3264 }
3265
3266 // Select the following pattern to left-shifted operand rather than BFI.
3267 // %val1 = op ..
3268 // %val2 = shl %val1, #imm
3269 // %res = or %val1, %val2
3270 //
3271 // If N is selected to be BFI, we know that
3272 // 1) OrOpd0 would be the operand from which extract bits (i.e., folded into
3273 // BFI) 2) OrOpd1 would be the destination operand (i.e., preserved)
3274 //
3275 // Instead of selecting N to BFI, fold OrOpd0 as a left shift directly.
3276 if (OrOpd0.getOperand(0) == OrOpd1) {
3277 SDValue Ops[] = {
3278 OrOpd1, OrOpd1,
3279 CurDAG->getTargetConstant(
3281 CurDAG->SelectNodeTo(N, OrrOpc, VT, Ops);
3282 return true;
3283 }
3284 }
3285
3286 uint64_t SrlImm;
3287 if (isOpcWithIntImmediate(OrOpd0.getNode(), ISD::SRL, SrlImm)) {
3288 // Select the following pattern to right-shifted operand rather than BFXIL.
3289 // %val1 = op ..
3290 // %val2 = lshr %val1, #imm
3291 // %res = or %val1, %val2
3292 //
3293 // If N is selected to be BFXIL, we know that
3294 // 1) OrOpd0 would be the operand from which extract bits (i.e., folded into
3295 // BFXIL) 2) OrOpd1 would be the destination operand (i.e., preserved)
3296 //
3297 // Instead of selecting N to BFXIL, fold OrOpd0 as a right shift directly.
3298 if (OrOpd0.getOperand(0) == OrOpd1) {
3299 SDValue Ops[] = {
3300 OrOpd1, OrOpd1,
3301 CurDAG->getTargetConstant(
3303 CurDAG->SelectNodeTo(N, OrrOpc, VT, Ops);
3304 return true;
3305 }
3306 }
3307
3308 return false;
3309}
3310
3311static bool tryBitfieldInsertOpFromOr(SDNode *N, const APInt &UsefulBits,
3312 SelectionDAG *CurDAG) {
3313 assert(N->getOpcode() == ISD::OR && "Expect a OR operation");
3314
3315 EVT VT = N->getValueType(0);
3316 if (VT != MVT::i32 && VT != MVT::i64)
3317 return false;
3318
3319 unsigned BitWidth = VT.getSizeInBits();
3320
3321 // Because of simplify-demanded-bits in DAGCombine, involved masks may not
3322 // have the expected shape. Try to undo that.
3323
3324 unsigned NumberOfIgnoredLowBits = UsefulBits.countr_zero();
3325 unsigned NumberOfIgnoredHighBits = UsefulBits.countl_zero();
3326
3327 // Given a OR operation, check if we have the following pattern
3328 // ubfm c, b, imm, imm2 (or something that does the same jobs, see
3329 // isBitfieldExtractOp)
3330 // d = e & mask2 ; where mask is a binary sequence of 1..10..0 and
3331 // countTrailingZeros(mask2) == imm2 - imm + 1
3332 // f = d | c
3333 // if yes, replace the OR instruction with:
3334 // f = BFM Opd0, Opd1, LSB, MSB ; where LSB = imm, and MSB = imm2
3335
3336 // OR is commutative, check all combinations of operand order and values of
3337 // BiggerPattern, i.e.
3338 // Opd0, Opd1, BiggerPattern=false
3339 // Opd1, Opd0, BiggerPattern=false
3340 // Opd0, Opd1, BiggerPattern=true
3341 // Opd1, Opd0, BiggerPattern=true
3342 // Several of these combinations may match, so check with BiggerPattern=false
3343 // first since that will produce better results by matching more instructions
3344 // and/or inserting fewer extra instructions.
3345 for (int I = 0; I < 4; ++I) {
3346
3347 SDValue Dst, Src;
3348 unsigned ImmR, ImmS;
3349 bool BiggerPattern = I / 2;
3350 SDValue OrOpd0Val = N->getOperand(I % 2);
3351 SDNode *OrOpd0 = OrOpd0Val.getNode();
3352 SDValue OrOpd1Val = N->getOperand((I + 1) % 2);
3353 SDNode *OrOpd1 = OrOpd1Val.getNode();
3354
3355 unsigned BFXOpc;
3356 int DstLSB, Width;
3357 if (isBitfieldExtractOp(CurDAG, OrOpd0, BFXOpc, Src, ImmR, ImmS,
3358 NumberOfIgnoredLowBits, BiggerPattern)) {
3359 // Check that the returned opcode is compatible with the pattern,
3360 // i.e., same type and zero extended (U and not S)
3361 if ((BFXOpc != AArch64::UBFMXri && VT == MVT::i64) ||
3362 (BFXOpc != AArch64::UBFMWri && VT == MVT::i32))
3363 continue;
3364
3365 // Compute the width of the bitfield insertion
3366 DstLSB = 0;
3367 Width = ImmS - ImmR + 1;
3368 // FIXME: This constraint is to catch bitfield insertion we may
3369 // want to widen the pattern if we want to grab general bitfied
3370 // move case
3371 if (Width <= 0)
3372 continue;
3373
3374 // If the mask on the insertee is correct, we have a BFXIL operation. We
3375 // can share the ImmR and ImmS values from the already-computed UBFM.
3376 } else if (isBitfieldPositioningOp(CurDAG, OrOpd0Val,
3377 BiggerPattern,
3378 Src, DstLSB, Width)) {
3379 ImmR = (BitWidth - DstLSB) % BitWidth;
3380 ImmS = Width - 1;
3381 } else
3382 continue;
3383
3384 // Check the second part of the pattern
3385 EVT VT = OrOpd1Val.getValueType();
3386 assert((VT == MVT::i32 || VT == MVT::i64) && "unexpected OR operand");
3387
3388 // Compute the Known Zero for the candidate of the first operand.
3389 // This allows to catch more general case than just looking for
3390 // AND with imm. Indeed, simplify-demanded-bits may have removed
3391 // the AND instruction because it proves it was useless.
3392 KnownBits Known = CurDAG->computeKnownBits(OrOpd1Val);
3393
3394 // Check if there is enough room for the second operand to appear
3395 // in the first one
3396 APInt BitsToBeInserted =
3397 APInt::getBitsSet(Known.getBitWidth(), DstLSB, DstLSB + Width);
3398
3399 if ((BitsToBeInserted & ~Known.Zero) != 0)
3400 continue;
3401
3402 // Set the first operand
3403 uint64_t Imm;
3404 if (isOpcWithIntImmediate(OrOpd1, ISD::AND, Imm) &&
3405 isBitfieldDstMask(Imm, BitsToBeInserted, NumberOfIgnoredHighBits, VT))
3406 // In that case, we can eliminate the AND
3407 Dst = OrOpd1->getOperand(0);
3408 else
3409 // Maybe the AND has been removed by simplify-demanded-bits
3410 // or is useful because it discards more bits
3411 Dst = OrOpd1Val;
3412
3413 // Before selecting ISD::OR node to AArch64::BFM, see if an AArch64::ORR
3414 // with shifted operand is more efficient.
3415 if (tryOrrWithShift(N, OrOpd0Val, OrOpd1Val, Src, Dst, CurDAG,
3416 BiggerPattern))
3417 return true;
3418
3419 // both parts match
3420 SDLoc DL(N);
3421 SDValue Ops[] = {Dst, Src, CurDAG->getTargetConstant(ImmR, DL, VT),
3422 CurDAG->getTargetConstant(ImmS, DL, VT)};
3423 unsigned Opc = (VT == MVT::i32) ? AArch64::BFMWri : AArch64::BFMXri;
3424 CurDAG->SelectNodeTo(N, Opc, VT, Ops);
3425 return true;
3426 }
3427
3428 // Generate a BFXIL from 'or (and X, Mask0Imm), (and Y, Mask1Imm)' iff
3429 // Mask0Imm and ~Mask1Imm are equivalent and one of the MaskImms is a shifted
3430 // mask (e.g., 0x000ffff0).
3431 uint64_t Mask0Imm, Mask1Imm;
3432 SDValue And0 = N->getOperand(0);
3433 SDValue And1 = N->getOperand(1);
3434 if (And0.hasOneUse() && And1.hasOneUse() &&
3435 isOpcWithIntImmediate(And0.getNode(), ISD::AND, Mask0Imm) &&
3436 isOpcWithIntImmediate(And1.getNode(), ISD::AND, Mask1Imm) &&
3437 APInt(BitWidth, Mask0Imm) == ~APInt(BitWidth, Mask1Imm) &&
3438 (isShiftedMask(Mask0Imm, VT) || isShiftedMask(Mask1Imm, VT))) {
3439
3440 // ORR is commutative, so canonicalize to the form 'or (and X, Mask0Imm),
3441 // (and Y, Mask1Imm)' where Mask1Imm is the shifted mask masking off the
3442 // bits to be inserted.
3443 if (isShiftedMask(Mask0Imm, VT)) {
3444 std::swap(And0, And1);
3445 std::swap(Mask0Imm, Mask1Imm);
3446 }
3447
3448 SDValue Src = And1->getOperand(0);
3449 SDValue Dst = And0->getOperand(0);
3450 unsigned LSB = llvm::countr_zero(Mask1Imm);
3451 int Width = BitWidth - APInt(BitWidth, Mask0Imm).popcount();
3452
3453 // The BFXIL inserts the low-order bits from a source register, so right
3454 // shift the needed bits into place.
3455 SDLoc DL(N);
3456 unsigned ShiftOpc = (VT == MVT::i32) ? AArch64::UBFMWri : AArch64::UBFMXri;
3457 uint64_t LsrImm = LSB;
3458 if (Src->hasOneUse() &&
3459 isOpcWithIntImmediate(Src.getNode(), ISD::SRL, LsrImm) &&
3460 (LsrImm + LSB) < BitWidth) {
3461 Src = Src->getOperand(0);
3462 LsrImm += LSB;
3463 }
3464
3465 SDNode *LSR = CurDAG->getMachineNode(
3466 ShiftOpc, DL, VT, Src, CurDAG->getTargetConstant(LsrImm, DL, VT),
3467 CurDAG->getTargetConstant(BitWidth - 1, DL, VT));
3468
3469 // BFXIL is an alias of BFM, so translate to BFM operands.
3470 unsigned ImmR = (BitWidth - LSB) % BitWidth;
3471 unsigned ImmS = Width - 1;
3472
3473 // Create the BFXIL instruction.
3474 SDValue Ops[] = {Dst, SDValue(LSR, 0),
3475 CurDAG->getTargetConstant(ImmR, DL, VT),
3476 CurDAG->getTargetConstant(ImmS, DL, VT)};
3477 unsigned Opc = (VT == MVT::i32) ? AArch64::BFMWri : AArch64::BFMXri;
3478 CurDAG->SelectNodeTo(N, Opc, VT, Ops);
3479 return true;
3480 }
3481
3482 return false;
3483}
3484
3485bool AArch64DAGToDAGISel::tryBitfieldInsertOp(SDNode *N) {
3486 if (N->getOpcode() != ISD::OR)
3487 return false;
3488
3489 APInt NUsefulBits;
3490 getUsefulBits(SDValue(N, 0), NUsefulBits);
3491
3492 // If all bits are not useful, just return UNDEF.
3493 if (!NUsefulBits) {
3494 CurDAG->SelectNodeTo(N, TargetOpcode::IMPLICIT_DEF, N->getValueType(0));
3495 return true;
3496 }
3497
3498 if (tryBitfieldInsertOpFromOr(N, NUsefulBits, CurDAG))
3499 return true;
3500
3501 return tryBitfieldInsertOpFromOrAndImm(N, CurDAG);
3502}
3503
3504/// SelectBitfieldInsertInZeroOp - Match a UBFIZ instruction that is the
3505/// equivalent of a left shift by a constant amount followed by an and masking
3506/// out a contiguous set of bits.
3507bool AArch64DAGToDAGISel::tryBitfieldInsertInZeroOp(SDNode *N) {
3508 if (N->getOpcode() != ISD::AND)
3509 return false;
3510
3511 EVT VT = N->getValueType(0);
3512 if (VT != MVT::i32 && VT != MVT::i64)
3513 return false;
3514
3515 SDValue Op0;
3516 int DstLSB, Width;
3517 if (!isBitfieldPositioningOp(CurDAG, SDValue(N, 0), /*BiggerPattern=*/false,
3518 Op0, DstLSB, Width))
3519 return false;
3520
3521 // ImmR is the rotate right amount.
3522 unsigned ImmR = (VT.getSizeInBits() - DstLSB) % VT.getSizeInBits();
3523 // ImmS is the most significant bit of the source to be moved.
3524 unsigned ImmS = Width - 1;
3525
3526 SDLoc DL(N);
3527 SDValue Ops[] = {Op0, CurDAG->getTargetConstant(ImmR, DL, VT),
3528 CurDAG->getTargetConstant(ImmS, DL, VT)};
3529 unsigned Opc = (VT == MVT::i32) ? AArch64::UBFMWri : AArch64::UBFMXri;
3530 CurDAG->SelectNodeTo(N, Opc, VT, Ops);
3531 return true;
3532}
3533
3534/// tryShiftAmountMod - Take advantage of built-in mod of shift amount in
3535/// variable shift/rotate instructions.
3536bool AArch64DAGToDAGISel::tryShiftAmountMod(SDNode *N) {
3537 EVT VT = N->getValueType(0);
3538
3539 unsigned Opc;
3540 switch (N->getOpcode()) {
3541 case ISD::ROTR:
3542 Opc = (VT == MVT::i32) ? AArch64::RORVWr : AArch64::RORVXr;
3543 break;
3544 case ISD::SHL:
3545 Opc = (VT == MVT::i32) ? AArch64::LSLVWr : AArch64::LSLVXr;
3546 break;
3547 case ISD::SRL:
3548 Opc = (VT == MVT::i32) ? AArch64::LSRVWr : AArch64::LSRVXr;
3549 break;
3550 case ISD::SRA:
3551 Opc = (VT == MVT::i32) ? AArch64::ASRVWr : AArch64::ASRVXr;
3552 break;
3553 default:
3554 return false;
3555 }
3556
3557 uint64_t Size;
3558 uint64_t Bits;
3559 if (VT == MVT::i32) {
3560 Bits = 5;
3561 Size = 32;
3562 } else if (VT == MVT::i64) {
3563 Bits = 6;
3564 Size = 64;
3565 } else
3566 return false;
3567
3568 SDValue ShiftAmt = N->getOperand(1);
3569 SDLoc DL(N);
3570 SDValue NewShiftAmt;
3571
3572 // Skip over an extend of the shift amount.
3573 if (ShiftAmt->getOpcode() == ISD::ZERO_EXTEND ||
3574 ShiftAmt->getOpcode() == ISD::ANY_EXTEND)
3575 ShiftAmt = ShiftAmt->getOperand(0);
3576
3577 if (ShiftAmt->getOpcode() == ISD::ADD || ShiftAmt->getOpcode() == ISD::SUB) {
3578 SDValue Add0 = ShiftAmt->getOperand(0);
3579 SDValue Add1 = ShiftAmt->getOperand(1);
3580 uint64_t Add0Imm;
3581 uint64_t Add1Imm;
3582 if (isIntImmediate(Add1, Add1Imm) && (Add1Imm % Size == 0)) {
3583 // If we are shifting by X+/-N where N == 0 mod Size, then just shift by X
3584 // to avoid the ADD/SUB.
3585 NewShiftAmt = Add0;
3586 } else if (ShiftAmt->getOpcode() == ISD::SUB &&
3587 isIntImmediate(Add0, Add0Imm) && Add0Imm != 0 &&
3588 (Add0Imm % Size == 0)) {
3589 // If we are shifting by N-X where N == 0 mod Size, then just shift by -X
3590 // to generate a NEG instead of a SUB from a constant.
3591 unsigned NegOpc;
3592 unsigned ZeroReg;
3593 EVT SubVT = ShiftAmt->getValueType(0);
3594 if (SubVT == MVT::i32) {
3595 NegOpc = AArch64::SUBWrr;
3596 ZeroReg = AArch64::WZR;
3597 } else {
3598 assert(SubVT == MVT::i64);
3599 NegOpc = AArch64::SUBXrr;
3600 ZeroReg = AArch64::XZR;
3601 }
3602 SDValue Zero =
3603 CurDAG->getCopyFromReg(CurDAG->getEntryNode(), DL, ZeroReg, SubVT);
3604 MachineSDNode *Neg =
3605 CurDAG->getMachineNode(NegOpc, DL, SubVT, Zero, Add1);
3606 NewShiftAmt = SDValue(Neg, 0);
3607 } else if (ShiftAmt->getOpcode() == ISD::SUB &&
3608 isIntImmediate(Add0, Add0Imm) && (Add0Imm % Size == Size - 1)) {
3609 // If we are shifting by N-X where N == -1 mod Size, then just shift by ~X
3610 // to generate a NOT instead of a SUB from a constant.
3611 unsigned NotOpc;
3612 unsigned ZeroReg;
3613 EVT SubVT = ShiftAmt->getValueType(0);
3614 if (SubVT == MVT::i32) {
3615 NotOpc = AArch64::ORNWrr;
3616 ZeroReg = AArch64::WZR;
3617 } else {
3618 assert(SubVT == MVT::i64);
3619 NotOpc = AArch64::ORNXrr;
3620 ZeroReg = AArch64::XZR;
3621 }
3622 SDValue Zero =
3623 CurDAG->getCopyFromReg(CurDAG->getEntryNode(), DL, ZeroReg, SubVT);
3624 MachineSDNode *Not =
3625 CurDAG->getMachineNode(NotOpc, DL, SubVT, Zero, Add1);
3626 NewShiftAmt = SDValue(Not, 0);
3627 } else
3628 return false;
3629 } else {
3630 // If the shift amount is masked with an AND, check that the mask covers the
3631 // bits that are implicitly ANDed off by the above opcodes and if so, skip
3632 // the AND.
3633 uint64_t MaskImm;
3634 if (!isOpcWithIntImmediate(ShiftAmt.getNode(), ISD::AND, MaskImm) &&
3635 !isOpcWithIntImmediate(ShiftAmt.getNode(), AArch64ISD::ANDS, MaskImm))
3636 return false;
3637
3638 if ((unsigned)llvm::countr_one(MaskImm) < Bits)
3639 return false;
3640
3641 NewShiftAmt = ShiftAmt->getOperand(0);
3642 }
3643
3644 // Narrow/widen the shift amount to match the size of the shift operation.
3645 if (VT == MVT::i32)
3646 NewShiftAmt = narrowIfNeeded(CurDAG, NewShiftAmt);
3647 else if (VT == MVT::i64 && NewShiftAmt->getValueType(0) == MVT::i32) {
3648 SDValue SubReg = CurDAG->getTargetConstant(AArch64::sub_32, DL, MVT::i32);
3649 MachineSDNode *Ext = CurDAG->getMachineNode(
3650 AArch64::SUBREG_TO_REG, DL, VT,
3651 CurDAG->getTargetConstant(0, DL, MVT::i64), NewShiftAmt, SubReg);
3652 NewShiftAmt = SDValue(Ext, 0);
3653 }
3654
3655 SDValue Ops[] = {N->getOperand(0), NewShiftAmt};
3656 CurDAG->SelectNodeTo(N, Opc, VT, Ops);
3657 return true;
3658}
3659
3661 SDValue &FixedPos,
3662 unsigned RegWidth,
3663 bool isReciprocal) {
3664 APFloat FVal(0.0);
3665 if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(N))
3666 FVal = CN->getValueAPF();
3667 else if (LoadSDNode *LN = dyn_cast<LoadSDNode>(N)) {
3668 // Some otherwise illegal constants are allowed in this case.
3669 if (LN->getOperand(1).getOpcode() != AArch64ISD::ADDlow ||
3670 !isa<ConstantPoolSDNode>(LN->getOperand(1)->getOperand(1)))
3671 return false;
3672
3673 ConstantPoolSDNode *CN =
3674 dyn_cast<ConstantPoolSDNode>(LN->getOperand(1)->getOperand(1));
3675 FVal = cast<ConstantFP>(CN->getConstVal())->getValueAPF();
3676 } else
3677 return false;
3678
3679 // An FCVT[SU] instruction performs: convertToInt(Val * 2^fbits) where fbits
3680 // is between 1 and 32 for a destination w-register, or 1 and 64 for an
3681 // x-register.
3682 //
3683 // By this stage, we've detected (fp_to_[su]int (fmul Val, THIS_NODE)) so we
3684 // want THIS_NODE to be 2^fbits. This is much easier to deal with using
3685 // integers.
3686 bool IsExact;
3687
3688 if (isReciprocal)
3689 if (!FVal.getExactInverse(&FVal))
3690 return false;
3691
3692 // fbits is between 1 and 64 in the worst-case, which means the fmul
3693 // could have 2^64 as an actual operand. Need 65 bits of precision.
3694 APSInt IntVal(65, true);
3695 FVal.convertToInteger(IntVal, APFloat::rmTowardZero, &IsExact);
3696
3697 // N.b. isPowerOf2 also checks for > 0.
3698 if (!IsExact || !IntVal.isPowerOf2())
3699 return false;
3700 unsigned FBits = IntVal.logBase2();
3701
3702 // Checks above should have guaranteed that we haven't lost information in
3703 // finding FBits, but it must still be in range.
3704 if (FBits == 0 || FBits > RegWidth) return false;
3705
3706 FixedPos = CurDAG->getTargetConstant(FBits, SDLoc(N), MVT::i32);
3707 return true;
3708}
3709
3710bool AArch64DAGToDAGISel::SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos,
3711 unsigned RegWidth) {
3712 return checkCVTFixedPointOperandWithFBits(CurDAG, N, FixedPos, RegWidth,
3713 false);
3714}
3715
3716bool AArch64DAGToDAGISel::SelectCVTFixedPosRecipOperand(SDValue N,
3717 SDValue &FixedPos,
3718 unsigned RegWidth) {
3719 return checkCVTFixedPointOperandWithFBits(CurDAG, N, FixedPos, RegWidth,
3720 true);
3721}
3722
3723// Inspects a register string of the form o0:op1:CRn:CRm:op2 gets the fields
3724// of the string and obtains the integer values from them and combines these
3725// into a single value to be used in the MRS/MSR instruction.
3728 RegString.split(Fields, ':');
3729
3730 if (Fields.size() == 1)
3731 return -1;
3732
3733 assert(Fields.size() == 5
3734 && "Invalid number of fields in read register string");
3735
3737 bool AllIntFields = true;
3738
3739 for (StringRef Field : Fields) {
3740 unsigned IntField;
3741 AllIntFields &= !Field.getAsInteger(10, IntField);
3742 Ops.push_back(IntField);
3743 }
3744
3745 assert(AllIntFields &&
3746 "Unexpected non-integer value in special register string.");
3747 (void)AllIntFields;
3748
3749 // Need to combine the integer fields of the string into a single value
3750 // based on the bit encoding of MRS/MSR instruction.
3751 return (Ops[0] << 14) | (Ops[1] << 11) | (Ops[2] << 7) |
3752 (Ops[3] << 3) | (Ops[4]);
3753}
3754
3755// Lower the read_register intrinsic to an MRS instruction node if the special
3756// register string argument is either of the form detailed in the ALCE (the
3757// form described in getIntOperandsFromRegsterString) or is a named register
3758// known by the MRS SysReg mapper.
3759bool AArch64DAGToDAGISel::tryReadRegister(SDNode *N) {
3760 const auto *MD = cast<MDNodeSDNode>(N->getOperand(1));
3761 const auto *RegString = cast<MDString>(MD->getMD()->getOperand(0));
3762 SDLoc DL(N);
3763
3764 bool ReadIs128Bit = N->getOpcode() == AArch64ISD::MRRS;
3765
3766 unsigned Opcode64Bit = AArch64::MRS;
3767 int Imm = getIntOperandFromRegisterString(RegString->getString());
3768 if (Imm == -1) {
3769 // No match, Use the sysreg mapper to map the remaining possible strings to
3770 // the value for the register to be used for the instruction operand.
3771 const auto *TheReg =
3772 AArch64SysReg::lookupSysRegByName(RegString->getString());
3773 if (TheReg && TheReg->Readable &&
3774 TheReg->haveFeatures(Subtarget->getFeatureBits()))
3775 Imm = TheReg->Encoding;
3776 else
3777 Imm = AArch64SysReg::parseGenericRegister(RegString->getString());
3778
3779 if (Imm == -1) {
3780 // Still no match, see if this is "pc" or give up.
3781 if (!ReadIs128Bit && RegString->getString() == "pc") {
3782 Opcode64Bit = AArch64::ADR;
3783 Imm = 0;
3784 } else {
3785 return false;
3786 }
3787 }
3788 }
3789
3790 SDValue InChain = N->getOperand(0);
3791 SDValue SysRegImm = CurDAG->getTargetConstant(Imm, DL, MVT::i32);
3792 if (!ReadIs128Bit) {
3793 CurDAG->SelectNodeTo(N, Opcode64Bit, MVT::i64, MVT::Other /* Chain */,
3794 {SysRegImm, InChain});
3795 } else {
3796 SDNode *MRRS = CurDAG->getMachineNode(
3797 AArch64::MRRS, DL,
3798 {MVT::Untyped /* XSeqPair */, MVT::Other /* Chain */},
3799 {SysRegImm, InChain});
3800
3801 // Sysregs are not endian. The even register always contains the low half
3802 // of the register.
3803 SDValue Lo = CurDAG->getTargetExtractSubreg(AArch64::sube64, DL, MVT::i64,
3804 SDValue(MRRS, 0));
3805 SDValue Hi = CurDAG->getTargetExtractSubreg(AArch64::subo64, DL, MVT::i64,
3806 SDValue(MRRS, 0));
3807 SDValue OutChain = SDValue(MRRS, 1);
3808
3809 ReplaceUses(SDValue(N, 0), Lo);
3810 ReplaceUses(SDValue(N, 1), Hi);
3811 ReplaceUses(SDValue(N, 2), OutChain);
3812 };
3813 return true;
3814}
3815
3816// Lower the write_register intrinsic to an MSR instruction node if the special
3817// register string argument is either of the form detailed in the ALCE (the
3818// form described in getIntOperandsFromRegsterString) or is a named register
3819// known by the MSR SysReg mapper.
3820bool AArch64DAGToDAGISel::tryWriteRegister(SDNode *N) {
3821 const auto *MD = cast<MDNodeSDNode>(N->getOperand(1));
3822 const auto *RegString = cast<MDString>(MD->getMD()->getOperand(0));
3823 SDLoc DL(N);
3824
3825 bool WriteIs128Bit = N->getOpcode() == AArch64ISD::MSRR;
3826
3827 if (!WriteIs128Bit) {
3828 // Check if the register was one of those allowed as the pstatefield value
3829 // in the MSR (immediate) instruction. To accept the values allowed in the
3830 // pstatefield for the MSR (immediate) instruction, we also require that an
3831 // immediate value has been provided as an argument, we know that this is
3832 // the case as it has been ensured by semantic checking.
3833 auto trySelectPState = [&](auto PMapper, unsigned State) {
3834 if (PMapper) {
3835 assert(isa<ConstantSDNode>(N->getOperand(2)) &&
3836 "Expected a constant integer expression.");
3837 unsigned Reg = PMapper->Encoding;
3838 uint64_t Immed = cast<ConstantSDNode>(N->getOperand(2))->getZExtValue();
3839 CurDAG->SelectNodeTo(
3840 N, State, MVT::Other, CurDAG->getTargetConstant(Reg, DL, MVT::i32),
3841 CurDAG->getTargetConstant(Immed, DL, MVT::i16), N->getOperand(0));
3842 return true;
3843 }
3844 return false;
3845 };
3846
3847 if (trySelectPState(
3848 AArch64PState::lookupPStateImm0_15ByName(RegString->getString()),
3849 AArch64::MSRpstateImm4))
3850 return true;
3851 if (trySelectPState(
3852 AArch64PState::lookupPStateImm0_1ByName(RegString->getString()),
3853 AArch64::MSRpstateImm1))
3854 return true;
3855 }
3856
3857 int Imm = getIntOperandFromRegisterString(RegString->getString());
3858 if (Imm == -1) {
3859 // Use the sysreg mapper to attempt to map the remaining possible strings
3860 // to the value for the register to be used for the MSR (register)
3861 // instruction operand.
3862 auto TheReg = AArch64SysReg::lookupSysRegByName(RegString->getString());
3863 if (TheReg && TheReg->Writeable &&
3864 TheReg->haveFeatures(Subtarget->getFeatureBits()))
3865 Imm = TheReg->Encoding;
3866 else
3867 Imm = AArch64SysReg::parseGenericRegister(RegString->getString());
3868
3869 if (Imm == -1)
3870 return false;
3871 }
3872
3873 SDValue InChain = N->getOperand(0);
3874 if (!WriteIs128Bit) {
3875 CurDAG->SelectNodeTo(N, AArch64::MSR, MVT::Other,
3876 CurDAG->getTargetConstant(Imm, DL, MVT::i32),
3877 N->getOperand(2), InChain);
3878 } else {
3879 // No endian swap. The lower half always goes into the even subreg, and the
3880 // higher half always into the odd supreg.
3881 SDNode *Pair = CurDAG->getMachineNode(
3882 TargetOpcode::REG_SEQUENCE, DL, MVT::Untyped /* XSeqPair */,
3883 {CurDAG->getTargetConstant(AArch64::XSeqPairsClassRegClass.getID(), DL,
3884 MVT::i32),
3885 N->getOperand(2),
3886 CurDAG->getTargetConstant(AArch64::sube64, DL, MVT::i32),
3887 N->getOperand(3),
3888 CurDAG->getTargetConstant(AArch64::subo64, DL, MVT::i32)});
3889
3890 CurDAG->SelectNodeTo(N, AArch64::MSRR, MVT::Other,
3891 CurDAG->getTargetConstant(Imm, DL, MVT::i32),
3892 SDValue(Pair, 0), InChain);
3893 }
3894
3895 return true;
3896}
3897
3898/// We've got special pseudo-instructions for these
3899bool AArch64DAGToDAGISel::SelectCMP_SWAP(SDNode *N) {
3900 unsigned Opcode;
3901 EVT MemTy = cast<MemSDNode>(N)->getMemoryVT();
3902
3903 // Leave IR for LSE if subtarget supports it.
3904 if (Subtarget->hasLSE()) return false;
3905
3906 if (MemTy == MVT::i8)
3907 Opcode = AArch64::CMP_SWAP_8;
3908 else if (MemTy == MVT::i16)
3909 Opcode = AArch64::CMP_SWAP_16;
3910 else if (MemTy == MVT::i32)
3911 Opcode = AArch64::CMP_SWAP_32;
3912 else if (MemTy == MVT::i64)
3913 Opcode = AArch64::CMP_SWAP_64;
3914 else
3915 llvm_unreachable("Unknown AtomicCmpSwap type");
3916
3917 MVT RegTy = MemTy == MVT::i64 ? MVT::i64 : MVT::i32;
3918 SDValue Ops[] = {N->getOperand(1), N->getOperand(2), N->getOperand(3),
3919 N->getOperand(0)};
3920 SDNode *CmpSwap = CurDAG->getMachineNode(
3921 Opcode, SDLoc(N),
3922 CurDAG->getVTList(RegTy, MVT::i32, MVT::Other), Ops);
3923
3924 MachineMemOperand *MemOp = cast<MemSDNode>(N)->getMemOperand();
3925 CurDAG->setNodeMemRefs(cast<MachineSDNode>(CmpSwap), {MemOp});
3926
3927 ReplaceUses(SDValue(N, 0), SDValue(CmpSwap, 0));
3928 ReplaceUses(SDValue(N, 1), SDValue(CmpSwap, 2));
3929 CurDAG->RemoveDeadNode(N);
3930
3931 return true;
3932}
3933
3934bool AArch64DAGToDAGISel::SelectSVEAddSubImm(SDValue N, MVT VT, SDValue &Imm,
3935 SDValue &Shift) {
3936 if (!isa<ConstantSDNode>(N))
3937 return false;
3938
3939 SDLoc DL(N);
3940 uint64_t Val = cast<ConstantSDNode>(N)
3941 ->getAPIntValue()
3942 .trunc(VT.getFixedSizeInBits())
3943 .getZExtValue();
3944
3945 switch (VT.SimpleTy) {
3946 case MVT::i8:
3947 // All immediates are supported.
3948 Shift = CurDAG->getTargetConstant(0, DL, MVT::i32);
3949 Imm = CurDAG->getTargetConstant(Val, DL, MVT::i32);
3950 return true;
3951 case MVT::i16:
3952 case MVT::i32:
3953 case MVT::i64:
3954 // Support 8bit unsigned immediates.
3955 if (Val <= 255) {
3956 Shift = CurDAG->getTargetConstant(0, DL, MVT::i32);
3957 Imm = CurDAG->getTargetConstant(Val, DL, MVT::i32);
3958 return true;
3959 }
3960 // Support 16bit unsigned immediates that are a multiple of 256.
3961 if (Val <= 65280 && Val % 256 == 0) {
3962 Shift = CurDAG->getTargetConstant(8, DL, MVT::i32);
3963 Imm = CurDAG->getTargetConstant(Val >> 8, DL, MVT::i32);
3964 return true;
3965 }
3966 break;
3967 default:
3968 break;
3969 }
3970
3971 return false;
3972}
3973
3974bool AArch64DAGToDAGISel::SelectSVECpyDupImm(SDValue N, MVT VT, SDValue &Imm,
3975 SDValue &Shift) {
3976 if (!isa<ConstantSDNode>(N))
3977 return false;
3978
3979 SDLoc DL(N);
3980 int64_t Val = cast<ConstantSDNode>(N)
3981 ->getAPIntValue()
3982 .trunc(VT.getFixedSizeInBits())
3983 .getSExtValue();
3984
3985 switch (VT.SimpleTy) {
3986 case MVT::i8:
3987 // All immediates are supported.
3988 Shift = CurDAG->getTargetConstant(0, DL, MVT::i32);
3989 Imm = CurDAG->getTargetConstant(Val & 0xFF, DL, MVT::i32);
3990 return true;
3991 case MVT::i16:
3992 case MVT::i32:
3993 case MVT::i64:
3994 // Support 8bit signed immediates.
3995 if (Val >= -128 && Val <= 127) {
3996 Shift = CurDAG->getTargetConstant(0, DL, MVT::i32);
3997 Imm = CurDAG->getTargetConstant(Val & 0xFF, DL, MVT::i32);
3998 return true;
3999 }
4000 // Support 16bit signed immediates that are a multiple of 256.
4001 if (Val >= -32768 && Val <= 32512 && Val % 256 == 0) {
4002 Shift = CurDAG->getTargetConstant(8, DL, MVT::i32);
4003 Imm = CurDAG->getTargetConstant((Val >> 8) & 0xFF, DL, MVT::i32);
4004 return true;
4005 }
4006 break;
4007 default:
4008 break;
4009 }
4010
4011 return false;
4012}
4013
4014bool AArch64DAGToDAGISel::SelectSVESignedArithImm(SDValue N, SDValue &Imm) {
4015 if (auto CNode = dyn_cast<ConstantSDNode>(N)) {
4016 int64_t ImmVal = CNode->getSExtValue();
4017 SDLoc DL(N);
4018 if (ImmVal >= -128 && ImmVal < 128) {
4019 Imm = CurDAG->getTargetConstant(ImmVal, DL, MVT::i32);
4020 return true;
4021 }
4022 }
4023 return false;
4024}
4025
4026bool AArch64DAGToDAGISel::SelectSVEArithImm(SDValue N, MVT VT, SDValue &Imm) {
4027 if (auto CNode = dyn_cast<ConstantSDNode>(N)) {
4028 uint64_t ImmVal = CNode->getZExtValue();
4029
4030 switch (VT.SimpleTy) {
4031 case MVT::i8:
4032 ImmVal &= 0xFF;
4033 break;
4034 case MVT::i16:
4035 ImmVal &= 0xFFFF;
4036 break;
4037 case MVT::i32:
4038 ImmVal &= 0xFFFFFFFF;
4039 break;
4040 case MVT::i64:
4041 break;
4042 default:
4043 llvm_unreachable("Unexpected type");
4044 }
4045
4046 if (ImmVal < 256) {
4047 Imm = CurDAG->getTargetConstant(ImmVal, SDLoc(N), MVT::i32);
4048 return true;
4049 }
4050 }
4051 return false;
4052}
4053
4054bool AArch64DAGToDAGISel::SelectSVELogicalImm(SDValue N, MVT VT, SDValue &Imm,
4055 bool Invert) {
4056 if (auto CNode = dyn_cast<ConstantSDNode>(N)) {
4057 uint64_t ImmVal = CNode->getZExtValue();
4058 SDLoc DL(N);
4059
4060 if (Invert)
4061 ImmVal = ~ImmVal;
4062
4063 // Shift mask depending on type size.
4064 switch (VT.SimpleTy) {
4065 case MVT::i8:
4066 ImmVal &= 0xFF;
4067 ImmVal |= ImmVal << 8;
4068 ImmVal |= ImmVal << 16;
4069 ImmVal |= ImmVal << 32;
4070 break;
4071 case MVT::i16:
4072 ImmVal &= 0xFFFF;
4073 ImmVal |= ImmVal << 16;
4074 ImmVal |= ImmVal << 32;
4075 break;
4076 case MVT::i32:
4077 ImmVal &= 0xFFFFFFFF;
4078 ImmVal |= ImmVal << 32;
4079 break;
4080 case MVT::i64:
4081 break;
4082 default:
4083 llvm_unreachable("Unexpected type");
4084 }
4085
4086 uint64_t encoding;
4087 if (AArch64_AM::processLogicalImmediate(ImmVal, 64, encoding)) {
4088 Imm = CurDAG->getTargetConstant(encoding, DL, MVT::i64);
4089 return true;
4090 }
4091 }
4092 return false;
4093}
4094
4095// SVE shift intrinsics allow shift amounts larger than the element's bitwidth.
4096// Rather than attempt to normalise everything we can sometimes saturate the
4097// shift amount during selection. This function also allows for consistent
4098// isel patterns by ensuring the resulting "Imm" node is of the i32 type
4099// required by the instructions.
4100bool AArch64DAGToDAGISel::SelectSVEShiftImm(SDValue N, uint64_t Low,
4101 uint64_t High, bool AllowSaturation,
4102 SDValue &Imm) {
4103 if (auto *CN = dyn_cast<ConstantSDNode>(N)) {
4104 uint64_t ImmVal = CN->getZExtValue();
4105
4106 // Reject shift amounts that are too small.
4107 if (ImmVal < Low)
4108 return false;
4109
4110 // Reject or saturate shift amounts that are too big.
4111 if (ImmVal > High) {
4112 if (!AllowSaturation)
4113 return false;
4114 ImmVal = High;
4115 }
4116
4117 Imm = CurDAG->getTargetConstant(ImmVal, SDLoc(N), MVT::i32);
4118 return true;
4119 }
4120
4121 return false;
4122}
4123
4124bool AArch64DAGToDAGISel::trySelectStackSlotTagP(SDNode *N) {
4125 // tagp(FrameIndex, IRGstack, tag_offset):
4126 // since the offset between FrameIndex and IRGstack is a compile-time
4127 // constant, this can be lowered to a single ADDG instruction.
4128 if (!(isa<FrameIndexSDNode>(N->getOperand(1)))) {
4129 return false;
4130 }
4131
4132 SDValue IRG_SP = N->getOperand(2);
4133 if (IRG_SP->getOpcode() != ISD::INTRINSIC_W_CHAIN ||
4134 cast<ConstantSDNode>(IRG_SP->getOperand(1))->getZExtValue() !=
4135 Intrinsic::aarch64_irg_sp) {
4136 return false;
4137 }
4138
4139 const TargetLowering *TLI = getTargetLowering();
4140 SDLoc DL(N);
4141 int FI = cast<FrameIndexSDNode>(N->getOperand(1))->getIndex();
4142 SDValue FiOp = CurDAG->getTargetFrameIndex(
4143 FI, TLI->getPointerTy(CurDAG->getDataLayout()));
4144 int TagOffset = cast<ConstantSDNode>(N->getOperand(3))->getZExtValue();
4145
4146 SDNode *Out = CurDAG->getMachineNode(
4147 AArch64::TAGPstack, DL, MVT::i64,
4148 {FiOp, CurDAG->getTargetConstant(0, DL, MVT::i64), N->getOperand(2),
4149 CurDAG->getTargetConstant(TagOffset, DL, MVT::i64)});
4150 ReplaceNode(N, Out);
4151 return true;
4152}
4153
4154void AArch64DAGToDAGISel::SelectTagP(SDNode *N) {
4155 assert(isa<ConstantSDNode>(N->getOperand(3)) &&
4156 "llvm.aarch64.tagp third argument must be an immediate");
4157 if (trySelectStackSlotTagP(N))
4158 return;
4159 // FIXME: above applies in any case when offset between Op1 and Op2 is a
4160 // compile-time constant, not just for stack allocations.
4161
4162 // General case for unrelated pointers in Op1 and Op2.
4163 SDLoc DL(N);
4164 int TagOffset = cast<ConstantSDNode>(N->getOperand(3))->getZExtValue();
4165 SDNode *N1 = CurDAG->getMachineNode(AArch64::SUBP, DL, MVT::i64,
4166 {N->getOperand(1), N->getOperand(2)});
4167 SDNode *N2 = CurDAG->getMachineNode(AArch64::ADDXrr, DL, MVT::i64,
4168 {SDValue(N1, 0), N->getOperand(2)});
4169 SDNode *N3 = CurDAG->getMachineNode(
4170 AArch64::ADDG, DL, MVT::i64,
4171 {SDValue(N2, 0), CurDAG->getTargetConstant(0, DL, MVT::i64),
4172 CurDAG->getTargetConstant(TagOffset, DL, MVT::i64)});
4173 ReplaceNode(N, N3);
4174}
4175
4176bool AArch64DAGToDAGISel::trySelectCastFixedLengthToScalableVector(SDNode *N) {
4177 assert(N->getOpcode() == ISD::INSERT_SUBVECTOR && "Invalid Node!");
4178
4179 // Bail when not a "cast" like insert_subvector.
4180 if (cast<ConstantSDNode>(N->getOperand(2))->getZExtValue() != 0)
4181 return false;
4182 if (!N->getOperand(0).isUndef())
4183 return false;
4184
4185 // Bail when normal isel should do the job.
4186 EVT VT = N->getValueType(0);
4187 EVT InVT = N->getOperand(1).getValueType();
4188 if (VT.isFixedLengthVector() || InVT.isScalableVector())
4189 return false;
4190 if (InVT.getSizeInBits() <= 128)
4191 return false;
4192
4193 // NOTE: We can only get here when doing fixed length SVE code generation.
4194 // We do manual selection because the types involved are not linked to real
4195 // registers (despite being legal) and must be coerced into SVE registers.
4196
4198 "Expected to insert into a packed scalable vector!");
4199
4200 SDLoc DL(N);
4201 auto RC = CurDAG->getTargetConstant(AArch64::ZPRRegClassID, DL, MVT::i64);
4202 ReplaceNode(N, CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS, DL, VT,
4203 N->getOperand(1), RC));
4204 return true;
4205}
4206
4207bool AArch64DAGToDAGISel::trySelectCastScalableToFixedLengthVector(SDNode *N) {
4208 assert(N->getOpcode() == ISD::EXTRACT_SUBVECTOR && "Invalid Node!");
4209
4210 // Bail when not a "cast" like extract_subvector.
4211 if (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue() != 0)
4212 return false;
4213
4214 // Bail when normal isel can do the job.
4215 EVT VT = N->getValueType(0);
4216 EVT InVT = N->getOperand(0).getValueType();
4217 if (VT.isScalableVector() || InVT.isFixedLengthVector())
4218 return false;
4219 if (VT.getSizeInBits() <= 128)
4220 return false;
4221
4222 // NOTE: We can only get here when doing fixed length SVE code generation.
4223 // We do manual selection because the types involved are not linked to real
4224 // registers (despite being legal) and must be coerced into SVE registers.
4225
4227 "Expected to extract from a packed scalable vector!");
4228
4229 SDLoc DL(N);
4230 auto RC = CurDAG->getTargetConstant(AArch64::ZPRRegClassID, DL, MVT::i64);
4231 ReplaceNode(N, CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS, DL, VT,
4232 N->getOperand(0), RC));
4233 return true;
4234}
4235
4236void AArch64DAGToDAGISel::Select(SDNode *Node) {
4237 // If we have a custom node, we already have selected!
4238 if (Node->isMachineOpcode()) {
4239 LLVM_DEBUG(errs() << "== "; Node->dump(CurDAG); errs() << "\n");
4240 Node->setNodeId(-1);
4241 return;
4242 }
4243
4244 // Few custom selection stuff.
4245 EVT VT = Node->getValueType(0);
4246
4247 switch (Node->getOpcode()) {
4248 default:
4249 break;
4250
4252 if (SelectCMP_SWAP(Node))
4253 return;
4254 break;
4255
4256 case ISD::READ_REGISTER:
4257 case AArch64ISD::MRRS:
4258 if (tryReadRegister(Node))
4259 return;
4260 break;
4261
4263 case AArch64ISD::MSRR:
4264 if (tryWriteRegister(Node))
4265 return;
4266 break;
4267
4268 case ISD::LOAD: {
4269 // Try to select as an indexed load. Fall through to normal processing
4270 // if we can't.
4271 if (tryIndexedLoad(Node))
4272 return;
4273 break;
4274 }
4275
4276 case ISD::SRL:
4277 case ISD::AND:
4278 case ISD::SRA:
4280 if (tryBitfieldExtractOp(Node))
4281 return;
4282 if (tryBitfieldInsertInZeroOp(Node))
4283 return;
4284 [[fallthrough]];
4285 case ISD::ROTR:
4286 case ISD::SHL:
4287 if (tryShiftAmountMod(Node))
4288 return;
4289 break;
4290
4291 case ISD::SIGN_EXTEND:
4292 if (tryBitfieldExtractOpFromSExt(Node))
4293 return;
4294 break;
4295
4296 case ISD::OR:
4297 if (tryBitfieldInsertOp(Node))
4298 return;
4299 break;
4300
4302 if (trySelectCastScalableToFixedLengthVector(Node))
4303 return;
4304 break;
4305 }
4306
4307 case ISD::INSERT_SUBVECTOR: {
4308 if (trySelectCastFixedLengthToScalableVector(Node))
4309 return;
4310 break;
4311 }
4312
4313 case ISD::Constant: {
4314 // Materialize zero constants as copies from WZR/XZR. This allows
4315 // the coalescer to propagate these into other instructions.
4316 ConstantSDNode *ConstNode = cast<ConstantSDNode>(Node);
4317 if (ConstNode->isZero()) {
4318 if (VT == MVT::i32) {
4319 SDValue New = CurDAG->getCopyFromReg(
4320 CurDAG->getEntryNode(), SDLoc(Node), AArch64::WZR, MVT::i32);
4321 ReplaceNode(Node, New.getNode());
4322 return;
4323 } else if (VT == MVT::i64) {
4324 SDValue New = CurDAG->getCopyFromReg(
4325 CurDAG->getEntryNode(), SDLoc(Node), AArch64::XZR, MVT::i64);
4326 ReplaceNode(Node, New.getNode());
4327 return;
4328 }
4329 }
4330 break;
4331 }
4332
4333 case ISD::FrameIndex: {
4334 // Selects to ADDXri FI, 0 which in turn will become ADDXri SP, imm.
4335 int FI = cast<FrameIndexSDNode>(Node)->getIndex();
4336 unsigned Shifter = AArch64_AM::getShifterImm(AArch64_AM::LSL, 0);
4337 const TargetLowering *TLI = getTargetLowering();
4338 SDValue TFI = CurDAG->getTargetFrameIndex(
4339 FI, TLI->getPointerTy(CurDAG->getDataLayout()));
4340 SDLoc DL(Node);
4341 SDValue Ops[] = { TFI, CurDAG->getTargetConstant(0, DL, MVT::i32),
4342 CurDAG->getTargetConstant(Shifter, DL, MVT::i32) };
4343 CurDAG->SelectNodeTo(Node, AArch64::ADDXri, MVT::i64, Ops);
4344 return;
4345 }
4347 unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
4348 switch (IntNo) {
4349 default:
4350 break;
4351 case Intrinsic::aarch64_ldaxp:
4352 case Intrinsic::aarch64_ldxp: {
4353 unsigned Op =
4354 IntNo == Intrinsic::aarch64_ldaxp ? AArch64::LDAXPX : AArch64::LDXPX;
4355 SDValue MemAddr = Node->getOperand(2);
4356 SDLoc DL(Node);
4357 SDValue Chain = Node->getOperand(0);
4358
4359 SDNode *Ld = CurDAG->getMachineNode(Op, DL, MVT::i64, MVT::i64,
4360 MVT::Other, MemAddr, Chain);
4361
4362 // Transfer memoperands.
4364 cast<MemIntrinsicSDNode>(Node)->getMemOperand();
4365 CurDAG->setNodeMemRefs(cast<MachineSDNode>(Ld), {MemOp});
4366 ReplaceNode(Node, Ld);
4367 return;
4368 }
4369 case Intrinsic::aarch64_stlxp:
4370 case Intrinsic::aarch64_stxp: {
4371 unsigned Op =
4372 IntNo == Intrinsic::aarch64_stlxp ? AArch64::STLXPX : AArch64::STXPX;
4373 SDLoc DL(Node);
4374 SDValue Chain = Node->getOperand(0);
4375 SDValue ValLo = Node->getOperand(2);
4376 SDValue ValHi = Node->getOperand(3);
4377 SDValue MemAddr = Node->getOperand(4);
4378
4379 // Place arguments in the right order.
4380 SDValue Ops[] = {ValLo, ValHi, MemAddr, Chain};
4381
4382 SDNode *St = CurDAG->getMachineNode(Op, DL, MVT::i32, MVT::Other, Ops);
4383 // Transfer memoperands.
4385 cast<MemIntrinsicSDNode>(Node)->getMemOperand();
4386 CurDAG->setNodeMemRefs(cast<MachineSDNode>(St), {MemOp});
4387
4388 ReplaceNode(Node, St);
4389 return;
4390 }
4391 case Intrinsic::aarch64_neon_ld1x2:
4392 if (VT == MVT::v8i8) {
4393 SelectLoad(Node, 2, AArch64::LD1Twov8b, AArch64::dsub0);
4394 return;
4395 } else if (VT == MVT::v16i8) {
4396 SelectLoad(Node, 2, AArch64::LD1Twov16b, AArch64::qsub0);
4397 return;
4398 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
4399 SelectLoad(Node, 2, AArch64::LD1Twov4h, AArch64::dsub0);
4400 return;
4401 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
4402 SelectLoad(Node, 2, AArch64::LD1Twov8h, AArch64::qsub0);
4403 return;
4404 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
4405 SelectLoad(Node, 2, AArch64::LD1Twov2s, AArch64::dsub0);
4406 return;
4407 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
4408 SelectLoad(Node, 2, AArch64::LD1Twov4s, AArch64::qsub0);
4409 return;
4410 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
4411 SelectLoad(Node, 2, AArch64::LD1Twov1d, AArch64::dsub0);
4412 return;
4413 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
4414 SelectLoad(Node, 2, AArch64::LD1Twov2d, AArch64::qsub0);
4415 return;
4416 }
4417 break;
4418 case Intrinsic::aarch64_neon_ld1x3:
4419 if (VT == MVT::v8i8) {
4420 SelectLoad(Node, 3, AArch64::LD1Threev8b, AArch64::dsub0);
4421 return;
4422 } else if (VT == MVT::v16i8) {
4423 SelectLoad(Node, 3, AArch64::LD1Threev16b, AArch64::qsub0);
4424 return;
4425 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
4426 SelectLoad(Node, 3, AArch64::LD1Threev4h, AArch64::dsub0);
4427 return;
4428 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
4429 SelectLoad(Node, 3, AArch64::LD1Threev8h, AArch64::qsub0);
4430 return;
4431 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
4432 SelectLoad(Node, 3, AArch64::LD1Threev2s, AArch64::dsub0);
4433 return;
4434 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
4435 SelectLoad(Node, 3, AArch64::LD1Threev4s, AArch64::qsub0);
4436 return;
4437 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
4438 SelectLoad(Node, 3, AArch64::LD1Threev1d, AArch64::dsub0);
4439 return;
4440 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
4441 SelectLoad(Node, 3, AArch64::LD1Threev2d, AArch64::qsub0);
4442 return;
4443 }
4444 break;
4445 case Intrinsic::aarch64_neon_ld1x4:
4446 if (VT == MVT::v8i8) {
4447 SelectLoad(Node, 4, AArch64::LD1Fourv8b, AArch64::dsub0);
4448 return;
4449 } else if (VT == MVT::v16i8) {
4450 SelectLoad(Node, 4, AArch64::LD1Fourv16b, AArch64::qsub0);
4451 return;
4452 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
4453 SelectLoad(Node, 4, AArch64::LD1Fourv4h, AArch64::dsub0);
4454 return;
4455 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
4456 SelectLoad(Node, 4, AArch64::LD1Fourv8h, AArch64::qsub0);
4457 return;
4458 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
4459 SelectLoad(Node, 4, AArch64::LD1Fourv2s, AArch64::dsub0);
4460 return;
4461 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
4462 SelectLoad(Node, 4, AArch64::LD1Fourv4s, AArch64::qsub0);
4463 return;
4464 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
4465 SelectLoad(Node, 4, AArch64::LD1Fourv1d, AArch64::dsub0);
4466 return;
4467 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
4468 SelectLoad(Node, 4, AArch64::LD1Fourv2d, AArch64::qsub0);
4469 return;
4470 }
4471 break;
4472 case Intrinsic::aarch64_neon_ld2:
4473 if (VT == MVT::v8i8) {
4474 SelectLoad(Node, 2, AArch64::LD2Twov8b, AArch64::dsub0);
4475 return;
4476 } else if (VT == MVT::v16i8) {
4477 SelectLoad(Node, 2, AArch64::LD2Twov16b, AArch64::qsub0);
4478 return;
4479 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
4480 SelectLoad(Node, 2, AArch64::LD2Twov4h, AArch64::dsub0);
4481 return;
4482 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
4483 SelectLoad(Node, 2, AArch64::LD2Twov8h, AArch64::qsub0);
4484 return;
4485 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
4486 SelectLoad(Node, 2, AArch64::LD2Twov2s, AArch64::dsub0);
4487 return;
4488 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
4489 SelectLoad(Node, 2, AArch64::LD2Twov4s, AArch64::qsub0);
4490 return;
4491 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
4492 SelectLoad(Node, 2, AArch64::LD1Twov1d, AArch64::dsub0);
4493 return;
4494 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
4495 SelectLoad(Node, 2, AArch64::LD2Twov2d, AArch64::qsub0);
4496 return;
4497 }
4498 break;
4499 case Intrinsic::aarch64_neon_ld3:
4500 if (VT == MVT::v8i8) {
4501 SelectLoad(Node, 3, AArch64::LD3Threev8b, AArch64::dsub0);
4502 return;
4503 } else if (VT == MVT::v16i8) {
4504 SelectLoad(Node, 3, AArch64::LD3Threev16b, AArch64::qsub0);
4505 return;
4506 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
4507 SelectLoad(Node, 3, AArch64::LD3Threev4h, AArch64::dsub0);
4508 return;
4509 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
4510 SelectLoad(Node, 3, AArch64::LD3Threev8h, AArch64::qsub0);
4511 return;
4512 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
4513 SelectLoad(Node, 3, AArch64::LD3Threev2s, AArch64::dsub0);
4514 return;
4515 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
4516 SelectLoad(Node, 3, AArch64::LD3Threev4s, AArch64::qsub0);
4517 return;
4518 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
4519 SelectLoad(Node, 3, AArch64::LD1Threev1d, AArch64::dsub0);
4520 return;
4521 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
4522 SelectLoad(Node, 3, AArch64::LD3Threev2d, AArch64::qsub0);
4523 return;
4524 }
4525 break;
4526 case Intrinsic::aarch64_neon_ld4:
4527 if (VT == MVT::v8i8) {
4528 SelectLoad(Node, 4, AArch64::LD4Fourv8b, AArch64::dsub0);
4529 return;
4530 } else if (VT == MVT::v16i8) {
4531 SelectLoad(Node, 4, AArch64::LD4Fourv16b, AArch64::qsub0);
4532 return;
4533 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
4534 SelectLoad(Node, 4, AArch64::LD4Fourv4h, AArch64::dsub0);
4535 return;
4536 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
4537 SelectLoad(Node, 4, AArch64::LD4Fourv8h, AArch64::qsub0);
4538 return;
4539 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
4540 SelectLoad(Node, 4, AArch64::LD4Fourv2s, AArch64::dsub0);
4541 return;
4542 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
4543 SelectLoad(Node, 4, AArch64::LD4Fourv4s, AArch64::qsub0);
4544 return;
4545 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
4546 SelectLoad(Node, 4, AArch64::LD1Fourv1d, AArch64::dsub0);
4547 return;
4548 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
4549 SelectLoad(Node, 4, AArch64::LD4Fourv2d, AArch64::qsub0);
4550 return;
4551 }
4552 break;
4553 case Intrinsic::aarch64_neon_ld2r:
4554 if (VT == MVT::v8i8) {
4555 SelectLoad(Node, 2, AArch64::LD2Rv8b, AArch64::dsub0);
4556 return;
4557 } else if (VT == MVT::v16i8) {
4558 SelectLoad(Node, 2, AArch64::LD2Rv16b, AArch64::qsub0);
4559 return;
4560 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
4561 SelectLoad(Node, 2, AArch64::LD2Rv4h, AArch64::dsub0);
4562 return;
4563 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
4564 SelectLoad(Node, 2, AArch64::LD2Rv8h, AArch64::qsub0);
4565 return;
4566 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
4567 SelectLoad(Node, 2, AArch64::LD2Rv2s, AArch64::dsub0);
4568 return;
4569 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
4570 SelectLoad(Node, 2, AArch64::LD2Rv4s, AArch64::qsub0);
4571 return;
4572 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
4573 SelectLoad(Node, 2, AArch64::LD2Rv1d, AArch64::dsub0);
4574 return;
4575 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
4576 SelectLoad(Node, 2, AArch64::LD2Rv2d, AArch64::qsub0);
4577 return;
4578 }
4579 break;
4580 case Intrinsic::aarch64_neon_ld3r:
4581 if (VT == MVT::v8i8) {
4582 SelectLoad(Node, 3, AArch64::LD3Rv8b, AArch64::dsub0);
4583 return;
4584 } else if (VT == MVT::v16i8) {
4585 SelectLoad(Node, 3, AArch64::LD3Rv16b, AArch64::qsub0);
4586 return;
4587 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
4588 SelectLoad(Node, 3, AArch64::LD3Rv4h, AArch64::dsub0);
4589 return;
4590 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
4591 SelectLoad(Node, 3, AArch64::LD3Rv8h, AArch64::qsub0);
4592 return;
4593 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
4594 SelectLoad(Node, 3, AArch64::LD3Rv2s, AArch64::dsub0);
4595 return;
4596 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
4597 SelectLoad(Node, 3, AArch64::LD3Rv4s, AArch64::qsub0);
4598 return;
4599 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
4600 SelectLoad(Node, 3, AArch64::LD3Rv1d, AArch64::dsub0);
4601 return;
4602 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
4603 SelectLoad(Node, 3, AArch64::LD3Rv2d, AArch64::qsub0);
4604 return;
4605 }
4606 break;
4607 case Intrinsic::aarch64_neon_ld4r:
4608 if (VT == MVT::v8i8) {
4609 SelectLoad(Node, 4, AArch64::LD4Rv8b, AArch64::dsub0);
4610 return;
4611 } else if (VT == MVT::v16i8) {
4612 SelectLoad(Node, 4, AArch64::LD4Rv16b, AArch64::qsub0);
4613 return;
4614 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
4615 SelectLoad(Node, 4, AArch64::LD4Rv4h, AArch64::dsub0);
4616 return;
4617 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
4618 SelectLoad(Node, 4, AArch64::LD4Rv8h, AArch64::qsub0);
4619 return;
4620 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
4621 SelectLoad(Node, 4, AArch64::LD4Rv2s, AArch64::dsub0);
4622 return;
4623 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
4624 SelectLoad(Node, 4, AArch64::LD4Rv4s, AArch64::qsub0);
4625 return;
4626 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
4627 SelectLoad(Node, 4, AArch64::LD4Rv1d, AArch64::dsub0);
4628 return;
4629 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
4630 SelectLoad(Node, 4, AArch64::LD4Rv2d, AArch64::qsub0);
4631 return;
4632 }
4633 break;
4634 case Intrinsic::aarch64_neon_ld2lane:
4635 if (VT == MVT::v16i8 || VT == MVT::v8i8) {
4636 SelectLoadLane(Node, 2, AArch64::LD2i8);
4637 return;
4638 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
4639 VT == MVT::v8f16 || VT == MVT::v4bf16 || VT == MVT::v8bf16) {
4640 SelectLoadLane(Node, 2, AArch64::LD2i16);
4641 return;
4642 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
4643 VT == MVT::v2f32) {
4644 SelectLoadLane(Node, 2, AArch64::LD2i32);
4645 return;
4646 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
4647 VT == MVT::v1f64) {
4648 SelectLoadLane(Node, 2, AArch64::LD2i64);
4649 return;
4650 }
4651 break;
4652 case Intrinsic::aarch64_neon_ld3lane:
4653 if (VT == MVT::v16i8 || VT == MVT::v8i8) {
4654 SelectLoadLane(Node, 3, AArch64::LD3i8);
4655 return;
4656 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
4657 VT == MVT::v8f16 || VT == MVT::v4bf16 || VT == MVT::v8bf16) {
4658 SelectLoadLane(Node, 3, AArch64::LD3i16);
4659 return;
4660 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
4661 VT == MVT::v2f32) {
4662 SelectLoadLane(Node, 3, AArch64::LD3i32);
4663 return;
4664 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
4665 VT == MVT::v1f64) {
4666 SelectLoadLane(Node, 3, AArch64::LD3i64);
4667 return;
4668 }
4669 break;
4670 case Intrinsic::aarch64_neon_ld4lane:
4671 if (VT == MVT::v16i8 || VT == MVT::v8i8) {
4672 SelectLoadLane(Node, 4, AArch64::LD4i8);
4673 return;
4674 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
4675 VT == MVT::v8f16 || VT == MVT::v4bf16 || VT == MVT::v8bf16) {
4676 SelectLoadLane(Node, 4, AArch64::LD4i16);
4677 return;
4678 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
4679 VT == MVT::v2f32) {
4680 SelectLoadLane(Node, 4, AArch64::LD4i32);
4681 return;
4682 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
4683 VT == MVT::v1f64) {
4684 SelectLoadLane(Node, 4, AArch64::LD4i64);
4685 return;
4686 }
4687 break;
4688 case Intrinsic::aarch64_ld64b:
4689 SelectLoad(Node, 8, AArch64::LD64B, AArch64::x8sub_0);
4690 return;
4691 case Intrinsic::aarch64_sve_ld2_sret: {
4692 if (VT == MVT::nxv16i8) {
4693 SelectPredicatedLoad(Node, 2, 0, AArch64::LD2B_IMM, AArch64::LD2B,
4694 true);
4695 return;
4696 } else if (VT == MVT::nxv8i16 || VT == MVT::nxv8f16 ||
4697 VT == MVT::nxv8bf16) {
4698 SelectPredicatedLoad(Node, 2, 1, AArch64::LD2H_IMM, AArch64::LD2H,
4699 true);
4700 return;
4701 } else if (VT == MVT::nxv4i32 || VT == MVT::nxv4f32) {
4702 SelectPredicatedLoad(Node, 2, 2, AArch64::LD2W_IMM, AArch64::LD2W,
4703 true);
4704 return;
4705 } else if (VT == MVT::nxv2i64 || VT == MVT::nxv2f64) {
4706 SelectPredicatedLoad(Node, 2, 3, AArch64::LD2D_IMM, AArch64::LD2D,
4707 true);
4708 return;
4709 }
4710 break;
4711 }
4712 case Intrinsic::aarch64_sve_ld1_pn_x2: {
4713 if (VT == MVT::nxv16i8) {
4714 if (Subtarget->hasSME2())
4715 SelectContiguousMultiVectorLoad(
4716 Node, 2, 0, AArch64::LD1B_2Z_IMM_PSEUDO, AArch64::LD1B_2Z_PSEUDO);
4717 else if (Subtarget->hasSVE2p1())
4718 SelectContiguousMultiVectorLoad(Node, 2, 0, AArch64::LD1B_2Z_IMM,
4719 AArch64::LD1B_2Z);
4720 else
4721 break;
4722 return;
4723 } else if (VT == MVT::nxv8i16 || VT == MVT::nxv8f16 ||
4724 VT == MVT::nxv8bf16) {
4725 if (Subtarget->hasSME2())
4726 SelectContiguousMultiVectorLoad(
4727 Node, 2, 1, AArch64::LD1H_2Z_IMM_PSEUDO, AArch64::LD1H_2Z_PSEUDO);
4728 else if (Subtarget->hasSVE2p1())
4729 SelectContiguousMultiVectorLoad(Node, 2, 1, AArch64::LD1H_2Z_IMM,
4730 AArch64::LD1H_2Z);
4731 else
4732 break;
4733 return;
4734 } else if (VT == MVT::nxv4i32 || VT == MVT::nxv4f32) {
4735 if (Subtarget->hasSME2())
4736 SelectContiguousMultiVectorLoad(
4737 Node, 2, 2, AArch64::LD1W_2Z_IMM_PSEUDO, AArch64::LD1W_2Z_PSEUDO);
4738 else if (Subtarget->hasSVE2p1())
4739 SelectContiguousMultiVectorLoad(Node, 2, 2, AArch64::LD1W_2Z_IMM,
4740 AArch64::LD1W_2Z);
4741 else
4742 break;
4743 return;
4744 } else if (VT == MVT::nxv2i64 || VT == MVT::nxv2f64) {
4745 if (Subtarget->hasSME2())
4746 SelectContiguousMultiVectorLoad(
4747 Node, 2, 3, AArch64::LD1D_2Z_IMM_PSEUDO, AArch64::LD1D_2Z_PSEUDO);
4748 else if (Subtarget->hasSVE2p1())
4749 SelectContiguousMultiVectorLoad(Node, 2, 3, AArch64::LD1D_2Z_IMM,
4750 AArch64::LD1D_2Z);
4751 else
4752 break;
4753 return;
4754 }
4755 break;
4756 }
4757 case Intrinsic::aarch64_sve_ld1_pn_x4: {
4758 if (VT == MVT::nxv16i8) {
4759 if (Subtarget->hasSME2())
4760 SelectContiguousMultiVectorLoad(
4761 Node, 4, 0, AArch64::LD1B_4Z_IMM_PSEUDO, AArch64::LD1B_4Z_PSEUDO);
4762 else if (Subtarget->hasSVE2p1())
4763 SelectContiguousMultiVectorLoad(Node, 4, 0, AArch64::LD1B_4Z_IMM,
4764 AArch64::LD1B_4Z);
4765 else
4766 break;
4767 return;
4768 } else if (VT == MVT::nxv8i16 || VT == MVT::nxv8f16 ||
4769 VT == MVT::nxv8bf16) {
4770 if (Subtarget->hasSME2())
4771 SelectContiguousMultiVectorLoad(
4772 Node, 4, 1, AArch64::LD1H_4Z_IMM_PSEUDO, AArch64::LD1H_4Z_PSEUDO);
4773 else if (Subtarget->hasSVE2p1())
4774 SelectContiguousMultiVectorLoad(Node, 4, 1, AArch64::LD1H_4Z_IMM,
4775 AArch64::LD1H_4Z);
4776 else
4777 break;
4778 return;
4779 } else if (VT == MVT::nxv4i32 || VT == MVT::nxv4f32) {
4780 if (Subtarget->hasSME2())
4781 SelectContiguousMultiVectorLoad(
4782 Node, 4, 2, AArch64::LD1W_4Z_IMM_PSEUDO, AArch64::LD1W_4Z_PSEUDO);
4783 else if (Subtarget->hasSVE2p1())
4784 SelectContiguousMultiVectorLoad(Node, 4, 2, AArch64::LD1W_4Z_IMM,
4785 AArch64::LD1W_4Z);
4786 else
4787 break;
4788 return;
4789 } else if (VT == MVT::nxv2i64 || VT == MVT::nxv2f64) {
4790 if (Subtarget->hasSME2())
4791 SelectContiguousMultiVectorLoad(
4792 Node, 4, 3, AArch64::LD1D_4Z_IMM_PSEUDO, AArch64::LD1D_4Z_PSEUDO);
4793 else if (Subtarget->hasSVE2p1())
4794 SelectContiguousMultiVectorLoad(Node, 4, 3, AArch64::LD1D_4Z_IMM,
4795 AArch64::LD1D_4Z);
4796 else
4797 break;
4798 return;
4799 }
4800 break;
4801 }
4802 case Intrinsic::aarch64_sve_ldnt1_pn_x2: {
4803 if (VT == MVT::nxv16i8) {
4804 if (Subtarget->hasSME2())
4805 SelectContiguousMultiVectorLoad(Node, 2, 0,
4806 AArch64::LDNT1B_2Z_IMM_PSEUDO,
4807 AArch64::LDNT1B_2Z_PSEUDO);
4808 else if (Subtarget->hasSVE2p1())
4809 SelectContiguousMultiVectorLoad(Node, 2, 0, AArch64::LDNT1B_2Z_IMM,
4810 AArch64::LDNT1B_2Z);
4811 else
4812 break;
4813 return;
4814 } else if (VT == MVT::nxv8i16 || VT == MVT::nxv8f16 ||
4815 VT == MVT::nxv8bf16) {
4816 if (Subtarget->hasSME2())
4817 SelectContiguousMultiVectorLoad(Node, 2, 1,
4818 AArch64::LDNT1H_2Z_IMM_PSEUDO,
4819 AArch64::LDNT1H_2Z_PSEUDO);
4820 else if (Subtarget->hasSVE2p1())
4821 SelectContiguousMultiVectorLoad(Node, 2, 1, AArch64::LDNT1H_2Z_IMM,
4822 AArch64::LDNT1H_2Z);
4823 else
4824 break;
4825 return;
4826 } else if (VT == MVT::nxv4i32 || VT == MVT::nxv4f32) {
4827 if (Subtarget->hasSME2())
4828 SelectContiguousMultiVectorLoad(Node, 2, 2,
4829 AArch64::LDNT1W_2Z_IMM_PSEUDO,
4830 AArch64::LDNT1W_2Z_PSEUDO);
4831 else if (Subtarget->hasSVE2p1())
4832 SelectContiguousMultiVectorLoad(Node, 2, 2, AArch64::LDNT1W_2Z_IMM,
4833 AArch64::LDNT1W_2Z);
4834 else
4835 break;
4836 return;
4837 } else if (VT == MVT::nxv2i64 || VT == MVT::nxv2f64) {
4838 if (Subtarget->hasSME2())
4839 SelectContiguousMultiVectorLoad(Node, 2, 3,
4840 AArch64::LDNT1D_2Z_IMM_PSEUDO,
4841 AArch64::LDNT1D_2Z_PSEUDO);
4842 else if (Subtarget->hasSVE2p1())
4843 SelectContiguousMultiVectorLoad(Node, 2, 3, AArch64::LDNT1D_2Z_IMM,
4844 AArch64::LDNT1D_2Z);
4845 else
4846 break;
4847 return;
4848 }
4849 break;
4850 }
4851 case Intrinsic::aarch64_sve_ldnt1_pn_x4: {
4852 if (VT == MVT::nxv16i8) {
4853 if (Subtarget->hasSME2())
4854 SelectContiguousMultiVectorLoad(Node, 4, 0,
4855 AArch64::LDNT1B_4Z_IMM_PSEUDO,
4856 AArch64::LDNT1B_4Z_PSEUDO);
4857 else if (Subtarget->hasSVE2p1())
4858 SelectContiguousMultiVectorLoad(Node, 4, 0, AArch64::LDNT1B_4Z_IMM,
4859 AArch64::LDNT1B_4Z);
4860 else
4861 break;
4862 return;
4863 } else if (VT == MVT::nxv8i16 || VT == MVT::nxv8f16 ||
4864 VT == MVT::nxv8bf16) {
4865 if (Subtarget->hasSME2())
4866 SelectContiguousMultiVectorLoad(Node, 4, 1,
4867 AArch64::LDNT1H_4Z_IMM_PSEUDO,
4868 AArch64::LDNT1H_4Z_PSEUDO);
4869 else if (Subtarget->hasSVE2p1())
4870 SelectContiguousMultiVectorLoad(Node, 4, 1, AArch64::LDNT1H_4Z_IMM,
4871 AArch64::LDNT1H_4Z);
4872 else
4873 break;
4874 return;
4875 } else if (VT == MVT::nxv4i32 || VT == MVT::nxv4f32) {
4876 if (Subtarget->hasSME2())
4877 SelectContiguousMultiVectorLoad(Node, 4, 2,
4878 AArch64::LDNT1W_4Z_IMM_PSEUDO,
4879 AArch64::LDNT1W_4Z_PSEUDO);
4880 else if (Subtarget->hasSVE2p1())
4881 SelectContiguousMultiVectorLoad(Node, 4, 2, AArch64::LDNT1W_4Z_IMM,
4882 AArch64::LDNT1W_4Z);
4883 else
4884 break;
4885 return;
4886 } else if (VT == MVT::nxv2i64 || VT == MVT::nxv2f64) {
4887 if (Subtarget->hasSME2())
4888 SelectContiguousMultiVectorLoad(Node, 4, 3,
4889 AArch64::LDNT1D_4Z_IMM_PSEUDO,
4890 AArch64::LDNT1D_4Z_PSEUDO);
4891 else if (Subtarget->hasSVE2p1())
4892 SelectContiguousMultiVectorLoad(Node, 4, 3, AArch64::LDNT1D_4Z_IMM,
4893 AArch64::LDNT1D_4Z);
4894 else
4895 break;
4896 return;
4897 }
4898 break;
4899 }
4900 case Intrinsic::aarch64_sve_ld3_sret: {
4901 if (VT == MVT::nxv16i8) {
4902 SelectPredicatedLoad(Node, 3, 0, AArch64::LD3B_IMM, AArch64::LD3B,
4903 true);
4904 return;
4905 } else if (VT == MVT::nxv8i16 || VT == MVT::nxv8f16 ||
4906 VT == MVT::nxv8bf16) {
4907 SelectPredicatedLoad(Node, 3, 1, AArch64::LD3H_IMM, AArch64::LD3H,
4908 true);
4909 return;
4910 } else if (VT == MVT::nxv4i32 || VT == MVT::nxv4f32) {
4911 SelectPredicatedLoad(Node, 3, 2, AArch64::LD3W_IMM, AArch64::LD3W,
4912 true);
4913 return;
4914 } else if (VT == MVT::nxv2i64 || VT == MVT::nxv2f64) {
4915 SelectPredicatedLoad(Node, 3, 3, AArch64::LD3D_IMM, AArch64::LD3D,
4916 true);
4917 return;
4918 }
4919 break;
4920 }
4921 case Intrinsic::aarch64_sve_ld4_sret: {
4922 if (VT == MVT::nxv16i8) {
4923 SelectPredicatedLoad(Node, 4, 0, AArch64::LD4B_IMM, AArch64::LD4B,
4924 true);
4925 return;
4926 } else if (VT == MVT::nxv8i16 || VT == MVT::nxv8f16 ||
4927 VT == MVT::nxv8bf16) {
4928 SelectPredicatedLoad(Node, 4, 1, AArch64::LD4H_IMM, AArch64::LD4H,
4929 true);
4930 return;
4931 } else if (VT == MVT::nxv4i32 || VT == MVT::nxv4f32) {
4932 SelectPredicatedLoad(Node, 4, 2, AArch64::LD4W_IMM, AArch64::LD4W,
4933 true);
4934 return;
4935 } else if (VT == MVT::nxv2i64 || VT == MVT::nxv2f64) {
4936 SelectPredicatedLoad(Node, 4, 3, AArch64::LD4D_IMM, AArch64::LD4D,
4937 true);
4938 return;
4939 }
4940 break;