LLVM 18.0.0git
ARMISelDAGToDAG.cpp
Go to the documentation of this file.
1//===-- ARMISelDAGToDAG.cpp - A dag to dag inst selector for ARM ----------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines an instruction selector for the ARM target.
10//
11//===----------------------------------------------------------------------===//
12
13#include "ARM.h"
14#include "ARMBaseInstrInfo.h"
15#include "ARMTargetMachine.h"
17#include "Utils/ARMBaseInfo.h"
18#include "llvm/ADT/APSInt.h"
27#include "llvm/IR/CallingConv.h"
28#include "llvm/IR/Constants.h"
30#include "llvm/IR/Function.h"
31#include "llvm/IR/Intrinsics.h"
32#include "llvm/IR/IntrinsicsARM.h"
33#include "llvm/IR/LLVMContext.h"
35#include "llvm/Support/Debug.h"
38#include <optional>
39
40using namespace llvm;
41
42#define DEBUG_TYPE "arm-isel"
43#define PASS_NAME "ARM Instruction Selection"
44
45static cl::opt<bool>
46DisableShifterOp("disable-shifter-op", cl::Hidden,
47 cl::desc("Disable isel of shifter-op"),
48 cl::init(false));
49
50//===--------------------------------------------------------------------===//
51/// ARMDAGToDAGISel - ARM specific code to select ARM machine
52/// instructions for SelectionDAG operations.
53///
54namespace {
55
56class ARMDAGToDAGISel : public SelectionDAGISel {
57 /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can
58 /// make the right decision when generating code for different targets.
59 const ARMSubtarget *Subtarget;
60
61public:
62 static char ID;
63
64 ARMDAGToDAGISel() = delete;
65
66 explicit ARMDAGToDAGISel(ARMBaseTargetMachine &tm, CodeGenOptLevel OptLevel)
67 : SelectionDAGISel(ID, tm, OptLevel) {}
68
69 bool runOnMachineFunction(MachineFunction &MF) override {
70 // Reset the subtarget each time through.
71 Subtarget = &MF.getSubtarget<ARMSubtarget>();
73 return true;
74 }
75
76 void PreprocessISelDAG() override;
77
78 /// getI32Imm - Return a target constant of type i32 with the specified
79 /// value.
80 inline SDValue getI32Imm(unsigned Imm, const SDLoc &dl) {
81 return CurDAG->getTargetConstant(Imm, dl, MVT::i32);
82 }
83
84 void Select(SDNode *N) override;
85
86 /// Return true as some complex patterns, like those that call
87 /// canExtractShiftFromMul can modify the DAG inplace.
88 bool ComplexPatternFuncMutatesDAG() const override { return true; }
89
90 bool hasNoVMLxHazardUse(SDNode *N) const;
91 bool isShifterOpProfitable(const SDValue &Shift,
92 ARM_AM::ShiftOpc ShOpcVal, unsigned ShAmt);
93 bool SelectRegShifterOperand(SDValue N, SDValue &A,
94 SDValue &B, SDValue &C,
95 bool CheckProfitability = true);
96 bool SelectImmShifterOperand(SDValue N, SDValue &A,
97 SDValue &B, bool CheckProfitability = true);
98 bool SelectShiftRegShifterOperand(SDValue N, SDValue &A, SDValue &B,
99 SDValue &C) {
100 // Don't apply the profitability check
101 return SelectRegShifterOperand(N, A, B, C, false);
102 }
103 bool SelectShiftImmShifterOperand(SDValue N, SDValue &A, SDValue &B) {
104 // Don't apply the profitability check
105 return SelectImmShifterOperand(N, A, B, false);
106 }
107 bool SelectShiftImmShifterOperandOneUse(SDValue N, SDValue &A, SDValue &B) {
108 if (!N.hasOneUse())
109 return false;
110 return SelectImmShifterOperand(N, A, B, false);
111 }
112
113 bool SelectAddLikeOr(SDNode *Parent, SDValue N, SDValue &Out);
114
115 bool SelectAddrModeImm12(SDValue N, SDValue &Base, SDValue &OffImm);
116 bool SelectLdStSOReg(SDValue N, SDValue &Base, SDValue &Offset, SDValue &Opc);
117
118 bool SelectCMOVPred(SDValue N, SDValue &Pred, SDValue &Reg) {
119 const ConstantSDNode *CN = cast<ConstantSDNode>(N);
120 Pred = CurDAG->getTargetConstant(CN->getZExtValue(), SDLoc(N), MVT::i32);
121 Reg = CurDAG->getRegister(ARM::CPSR, MVT::i32);
122 return true;
123 }
124
125 bool SelectAddrMode2OffsetReg(SDNode *Op, SDValue N,
126 SDValue &Offset, SDValue &Opc);
127 bool SelectAddrMode2OffsetImm(SDNode *Op, SDValue N,
128 SDValue &Offset, SDValue &Opc);
129 bool SelectAddrMode2OffsetImmPre(SDNode *Op, SDValue N,
130 SDValue &Offset, SDValue &Opc);
131 bool SelectAddrOffsetNone(SDValue N, SDValue &Base);
132 bool SelectAddrMode3(SDValue N, SDValue &Base,
133 SDValue &Offset, SDValue &Opc);
134 bool SelectAddrMode3Offset(SDNode *Op, SDValue N,
135 SDValue &Offset, SDValue &Opc);
136 bool IsAddressingMode5(SDValue N, SDValue &Base, SDValue &Offset, bool FP16);
137 bool SelectAddrMode5(SDValue N, SDValue &Base, SDValue &Offset);
138 bool SelectAddrMode5FP16(SDValue N, SDValue &Base, SDValue &Offset);
139 bool SelectAddrMode6(SDNode *Parent, SDValue N, SDValue &Addr,SDValue &Align);
140 bool SelectAddrMode6Offset(SDNode *Op, SDValue N, SDValue &Offset);
141
142 bool SelectAddrModePC(SDValue N, SDValue &Offset, SDValue &Label);
143
144 // Thumb Addressing Modes:
145 bool SelectThumbAddrModeRR(SDValue N, SDValue &Base, SDValue &Offset);
146 bool SelectThumbAddrModeRRSext(SDValue N, SDValue &Base, SDValue &Offset);
147 bool SelectThumbAddrModeImm5S(SDValue N, unsigned Scale, SDValue &Base,
148 SDValue &OffImm);
149 bool SelectThumbAddrModeImm5S1(SDValue N, SDValue &Base,
150 SDValue &OffImm);
151 bool SelectThumbAddrModeImm5S2(SDValue N, SDValue &Base,
152 SDValue &OffImm);
153 bool SelectThumbAddrModeImm5S4(SDValue N, SDValue &Base,
154 SDValue &OffImm);
155 bool SelectThumbAddrModeSP(SDValue N, SDValue &Base, SDValue &OffImm);
156 template <unsigned Shift>
157 bool SelectTAddrModeImm7(SDValue N, SDValue &Base, SDValue &OffImm);
158
159 // Thumb 2 Addressing Modes:
160 bool SelectT2AddrModeImm12(SDValue N, SDValue &Base, SDValue &OffImm);
161 template <unsigned Shift>
162 bool SelectT2AddrModeImm8(SDValue N, SDValue &Base, SDValue &OffImm);
163 bool SelectT2AddrModeImm8(SDValue N, SDValue &Base,
164 SDValue &OffImm);
165 bool SelectT2AddrModeImm8Offset(SDNode *Op, SDValue N,
166 SDValue &OffImm);
167 template <unsigned Shift>
168 bool SelectT2AddrModeImm7Offset(SDNode *Op, SDValue N, SDValue &OffImm);
169 bool SelectT2AddrModeImm7Offset(SDNode *Op, SDValue N, SDValue &OffImm,
170 unsigned Shift);
171 template <unsigned Shift>
172 bool SelectT2AddrModeImm7(SDValue N, SDValue &Base, SDValue &OffImm);
173 bool SelectT2AddrModeSoReg(SDValue N, SDValue &Base,
174 SDValue &OffReg, SDValue &ShImm);
175 bool SelectT2AddrModeExclusive(SDValue N, SDValue &Base, SDValue &OffImm);
176
177 template<int Min, int Max>
178 bool SelectImmediateInRange(SDValue N, SDValue &OffImm);
179
180 inline bool is_so_imm(unsigned Imm) const {
181 return ARM_AM::getSOImmVal(Imm) != -1;
182 }
183
184 inline bool is_so_imm_not(unsigned Imm) const {
185 return ARM_AM::getSOImmVal(~Imm) != -1;
186 }
187
188 inline bool is_t2_so_imm(unsigned Imm) const {
189 return ARM_AM::getT2SOImmVal(Imm) != -1;
190 }
191
192 inline bool is_t2_so_imm_not(unsigned Imm) const {
193 return ARM_AM::getT2SOImmVal(~Imm) != -1;
194 }
195
196 // Include the pieces autogenerated from the target description.
197#include "ARMGenDAGISel.inc"
198
199private:
200 void transferMemOperands(SDNode *Src, SDNode *Dst);
201
202 /// Indexed (pre/post inc/dec) load matching code for ARM.
203 bool tryARMIndexedLoad(SDNode *N);
204 bool tryT1IndexedLoad(SDNode *N);
205 bool tryT2IndexedLoad(SDNode *N);
206 bool tryMVEIndexedLoad(SDNode *N);
207 bool tryFMULFixed(SDNode *N, SDLoc dl);
208 bool tryFP_TO_INT(SDNode *N, SDLoc dl);
209 bool transformFixedFloatingPointConversion(SDNode *N, SDNode *FMul,
210 bool IsUnsigned,
211 bool FixedToFloat);
212
213 /// SelectVLD - Select NEON load intrinsics. NumVecs should be
214 /// 1, 2, 3 or 4. The opcode arrays specify the instructions used for
215 /// loads of D registers and even subregs and odd subregs of Q registers.
216 /// For NumVecs <= 2, QOpcodes1 is not used.
217 void SelectVLD(SDNode *N, bool isUpdating, unsigned NumVecs,
218 const uint16_t *DOpcodes, const uint16_t *QOpcodes0,
219 const uint16_t *QOpcodes1);
220
221 /// SelectVST - Select NEON store intrinsics. NumVecs should
222 /// be 1, 2, 3 or 4. The opcode arrays specify the instructions used for
223 /// stores of D registers and even subregs and odd subregs of Q registers.
224 /// For NumVecs <= 2, QOpcodes1 is not used.
225 void SelectVST(SDNode *N, bool isUpdating, unsigned NumVecs,
226 const uint16_t *DOpcodes, const uint16_t *QOpcodes0,
227 const uint16_t *QOpcodes1);
228
229 /// SelectVLDSTLane - Select NEON load/store lane intrinsics. NumVecs should
230 /// be 2, 3 or 4. The opcode arrays specify the instructions used for
231 /// load/store of D registers and Q registers.
232 void SelectVLDSTLane(SDNode *N, bool IsLoad, bool isUpdating,
233 unsigned NumVecs, const uint16_t *DOpcodes,
234 const uint16_t *QOpcodes);
235
236 /// Helper functions for setting up clusters of MVE predication operands.
237 template <typename SDValueVector>
238 void AddMVEPredicateToOps(SDValueVector &Ops, SDLoc Loc,
239 SDValue PredicateMask);
240 template <typename SDValueVector>
241 void AddMVEPredicateToOps(SDValueVector &Ops, SDLoc Loc,
242 SDValue PredicateMask, SDValue Inactive);
243
244 template <typename SDValueVector>
245 void AddEmptyMVEPredicateToOps(SDValueVector &Ops, SDLoc Loc);
246 template <typename SDValueVector>
247 void AddEmptyMVEPredicateToOps(SDValueVector &Ops, SDLoc Loc, EVT InactiveTy);
248
249 /// SelectMVE_WB - Select MVE writeback load/store intrinsics.
250 void SelectMVE_WB(SDNode *N, const uint16_t *Opcodes, bool Predicated);
251
252 /// SelectMVE_LongShift - Select MVE 64-bit scalar shift intrinsics.
253 void SelectMVE_LongShift(SDNode *N, uint16_t Opcode, bool Immediate,
254 bool HasSaturationOperand);
255
256 /// SelectMVE_VADCSBC - Select MVE vector add/sub-with-carry intrinsics.
257 void SelectMVE_VADCSBC(SDNode *N, uint16_t OpcodeWithCarry,
258 uint16_t OpcodeWithNoCarry, bool Add, bool Predicated);
259
260 /// SelectMVE_VSHLC - Select MVE intrinsics for a shift that carries between
261 /// vector lanes.
262 void SelectMVE_VSHLC(SDNode *N, bool Predicated);
263
264 /// Select long MVE vector reductions with two vector operands
265 /// Stride is the number of vector element widths the instruction can operate
266 /// on:
267 /// 2 for long non-rounding variants, vml{a,s}ldav[a][x]: [i16, i32]
268 /// 1 for long rounding variants: vrml{a,s}ldavh[a][x]: [i32]
269 /// Stride is used when addressing the OpcodesS array which contains multiple
270 /// opcodes for each element width.
271 /// TySize is the index into the list of element types listed above
272 void SelectBaseMVE_VMLLDAV(SDNode *N, bool Predicated,
273 const uint16_t *OpcodesS, const uint16_t *OpcodesU,
274 size_t Stride, size_t TySize);
275
276 /// Select a 64-bit MVE vector reduction with two vector operands
277 /// arm_mve_vmlldava_[predicated]
278 void SelectMVE_VMLLDAV(SDNode *N, bool Predicated, const uint16_t *OpcodesS,
279 const uint16_t *OpcodesU);
280 /// Select a 72-bit MVE vector rounding reduction with two vector operands
281 /// int_arm_mve_vrmlldavha[_predicated]
282 void SelectMVE_VRMLLDAVH(SDNode *N, bool Predicated, const uint16_t *OpcodesS,
283 const uint16_t *OpcodesU);
284
285 /// SelectMVE_VLD - Select MVE interleaving load intrinsics. NumVecs
286 /// should be 2 or 4. The opcode array specifies the instructions
287 /// used for 8, 16 and 32-bit lane sizes respectively, and each
288 /// pointer points to a set of NumVecs sub-opcodes used for the
289 /// different stages (e.g. VLD20 versus VLD21) of each load family.
290 void SelectMVE_VLD(SDNode *N, unsigned NumVecs,
291 const uint16_t *const *Opcodes, bool HasWriteback);
292
293 /// SelectMVE_VxDUP - Select MVE incrementing-dup instructions. Opcodes is an
294 /// array of 3 elements for the 8, 16 and 32-bit lane sizes.
295 void SelectMVE_VxDUP(SDNode *N, const uint16_t *Opcodes,
296 bool Wrapping, bool Predicated);
297
298 /// Select SelectCDE_CXxD - Select CDE dual-GPR instruction (one of CX1D,
299 /// CX1DA, CX2D, CX2DA, CX3, CX3DA).
300 /// \arg \c NumExtraOps number of extra operands besides the coprocossor,
301 /// the accumulator and the immediate operand, i.e. 0
302 /// for CX1*, 1 for CX2*, 2 for CX3*
303 /// \arg \c HasAccum whether the instruction has an accumulator operand
304 void SelectCDE_CXxD(SDNode *N, uint16_t Opcode, size_t NumExtraOps,
305 bool HasAccum);
306
307 /// SelectVLDDup - Select NEON load-duplicate intrinsics. NumVecs
308 /// should be 1, 2, 3 or 4. The opcode array specifies the instructions used
309 /// for loading D registers.
310 void SelectVLDDup(SDNode *N, bool IsIntrinsic, bool isUpdating,
311 unsigned NumVecs, const uint16_t *DOpcodes,
312 const uint16_t *QOpcodes0 = nullptr,
313 const uint16_t *QOpcodes1 = nullptr);
314
315 /// Try to select SBFX/UBFX instructions for ARM.
316 bool tryV6T2BitfieldExtractOp(SDNode *N, bool isSigned);
317
318 bool tryInsertVectorElt(SDNode *N);
319
320 // Select special operations if node forms integer ABS pattern
321 bool tryABSOp(SDNode *N);
322
323 bool tryReadRegister(SDNode *N);
324 bool tryWriteRegister(SDNode *N);
325
326 bool tryInlineAsm(SDNode *N);
327
328 void SelectCMPZ(SDNode *N, bool &SwitchEQNEToPLMI);
329
330 void SelectCMP_SWAP(SDNode *N);
331
332 /// SelectInlineAsmMemoryOperand - Implement addressing mode selection for
333 /// inline asm expressions.
335 InlineAsm::ConstraintCode ConstraintID,
336 std::vector<SDValue> &OutOps) override;
337
338 // Form pairs of consecutive R, S, D, or Q registers.
340 SDNode *createSRegPairNode(EVT VT, SDValue V0, SDValue V1);
341 SDNode *createDRegPairNode(EVT VT, SDValue V0, SDValue V1);
342 SDNode *createQRegPairNode(EVT VT, SDValue V0, SDValue V1);
343
344 // Form sequences of 4 consecutive S, D, or Q registers.
345 SDNode *createQuadSRegsNode(EVT VT, SDValue V0, SDValue V1, SDValue V2, SDValue V3);
346 SDNode *createQuadDRegsNode(EVT VT, SDValue V0, SDValue V1, SDValue V2, SDValue V3);
347 SDNode *createQuadQRegsNode(EVT VT, SDValue V0, SDValue V1, SDValue V2, SDValue V3);
348
349 // Get the alignment operand for a NEON VLD or VST instruction.
350 SDValue GetVLDSTAlign(SDValue Align, const SDLoc &dl, unsigned NumVecs,
351 bool is64BitVector);
352
353 /// Checks if N is a multiplication by a constant where we can extract out a
354 /// power of two from the constant so that it can be used in a shift, but only
355 /// if it simplifies the materialization of the constant. Returns true if it
356 /// is, and assigns to PowerOfTwo the power of two that should be extracted
357 /// out and to NewMulConst the new constant to be multiplied by.
358 bool canExtractShiftFromMul(const SDValue &N, unsigned MaxShift,
359 unsigned &PowerOfTwo, SDValue &NewMulConst) const;
360
361 /// Replace N with M in CurDAG, in a way that also ensures that M gets
362 /// selected when N would have been selected.
363 void replaceDAGValue(const SDValue &N, SDValue M);
364};
365}
366
367char ARMDAGToDAGISel::ID = 0;
368
369INITIALIZE_PASS(ARMDAGToDAGISel, DEBUG_TYPE, PASS_NAME, false, false)
370
371/// isInt32Immediate - This method tests to see if the node is a 32-bit constant
372/// operand. If so Imm will receive the 32-bit value.
373static bool isInt32Immediate(SDNode *N, unsigned &Imm) {
374 if (N->getOpcode() == ISD::Constant && N->getValueType(0) == MVT::i32) {
375 Imm = cast<ConstantSDNode>(N)->getZExtValue();
376 return true;
377 }
378 return false;
379}
380
381// isInt32Immediate - This method tests to see if a constant operand.
382// If so Imm will receive the 32 bit value.
383static bool isInt32Immediate(SDValue N, unsigned &Imm) {
384 return isInt32Immediate(N.getNode(), Imm);
385}
386
387// isOpcWithIntImmediate - This method tests to see if the node is a specific
388// opcode and that it has a immediate integer right operand.
389// If so Imm will receive the 32 bit value.
390static bool isOpcWithIntImmediate(SDNode *N, unsigned Opc, unsigned& Imm) {
391 return N->getOpcode() == Opc &&
392 isInt32Immediate(N->getOperand(1).getNode(), Imm);
393}
394
395/// Check whether a particular node is a constant value representable as
396/// (N * Scale) where (N in [\p RangeMin, \p RangeMax).
397///
398/// \param ScaledConstant [out] - On success, the pre-scaled constant value.
399static bool isScaledConstantInRange(SDValue Node, int Scale,
400 int RangeMin, int RangeMax,
401 int &ScaledConstant) {
402 assert(Scale > 0 && "Invalid scale!");
403
404 // Check that this is a constant.
405 const ConstantSDNode *C = dyn_cast<ConstantSDNode>(Node);
406 if (!C)
407 return false;
408
409 ScaledConstant = (int) C->getZExtValue();
410 if ((ScaledConstant % Scale) != 0)
411 return false;
412
413 ScaledConstant /= Scale;
414 return ScaledConstant >= RangeMin && ScaledConstant < RangeMax;
415}
416
417void ARMDAGToDAGISel::PreprocessISelDAG() {
418 if (!Subtarget->hasV6T2Ops())
419 return;
420
421 bool isThumb2 = Subtarget->isThumb();
422 // We use make_early_inc_range to avoid invalidation issues.
423 for (SDNode &N : llvm::make_early_inc_range(CurDAG->allnodes())) {
424 if (N.getOpcode() != ISD::ADD)
425 continue;
426
427 // Look for (add X1, (and (srl X2, c1), c2)) where c2 is constant with
428 // leading zeros, followed by consecutive set bits, followed by 1 or 2
429 // trailing zeros, e.g. 1020.
430 // Transform the expression to
431 // (add X1, (shl (and (srl X2, c1), (c2>>tz)), tz)) where tz is the number
432 // of trailing zeros of c2. The left shift would be folded as an shifter
433 // operand of 'add' and the 'and' and 'srl' would become a bits extraction
434 // node (UBFX).
435
436 SDValue N0 = N.getOperand(0);
437 SDValue N1 = N.getOperand(1);
438 unsigned And_imm = 0;
439 if (!isOpcWithIntImmediate(N1.getNode(), ISD::AND, And_imm)) {
440 if (isOpcWithIntImmediate(N0.getNode(), ISD::AND, And_imm))
441 std::swap(N0, N1);
442 }
443 if (!And_imm)
444 continue;
445
446 // Check if the AND mask is an immediate of the form: 000.....1111111100
447 unsigned TZ = llvm::countr_zero(And_imm);
448 if (TZ != 1 && TZ != 2)
449 // Be conservative here. Shifter operands aren't always free. e.g. On
450 // Swift, left shifter operand of 1 / 2 for free but others are not.
451 // e.g.
452 // ubfx r3, r1, #16, #8
453 // ldr.w r3, [r0, r3, lsl #2]
454 // vs.
455 // mov.w r9, #1020
456 // and.w r2, r9, r1, lsr #14
457 // ldr r2, [r0, r2]
458 continue;
459 And_imm >>= TZ;
460 if (And_imm & (And_imm + 1))
461 continue;
462
463 // Look for (and (srl X, c1), c2).
464 SDValue Srl = N1.getOperand(0);
465 unsigned Srl_imm = 0;
466 if (!isOpcWithIntImmediate(Srl.getNode(), ISD::SRL, Srl_imm) ||
467 (Srl_imm <= 2))
468 continue;
469
470 // Make sure first operand is not a shifter operand which would prevent
471 // folding of the left shift.
472 SDValue CPTmp0;
473 SDValue CPTmp1;
474 SDValue CPTmp2;
475 if (isThumb2) {
476 if (SelectImmShifterOperand(N0, CPTmp0, CPTmp1))
477 continue;
478 } else {
479 if (SelectImmShifterOperand(N0, CPTmp0, CPTmp1) ||
480 SelectRegShifterOperand(N0, CPTmp0, CPTmp1, CPTmp2))
481 continue;
482 }
483
484 // Now make the transformation.
485 Srl = CurDAG->getNode(ISD::SRL, SDLoc(Srl), MVT::i32,
486 Srl.getOperand(0),
487 CurDAG->getConstant(Srl_imm + TZ, SDLoc(Srl),
488 MVT::i32));
489 N1 = CurDAG->getNode(ISD::AND, SDLoc(N1), MVT::i32,
490 Srl,
491 CurDAG->getConstant(And_imm, SDLoc(Srl), MVT::i32));
492 N1 = CurDAG->getNode(ISD::SHL, SDLoc(N1), MVT::i32,
493 N1, CurDAG->getConstant(TZ, SDLoc(Srl), MVT::i32));
494 CurDAG->UpdateNodeOperands(&N, N0, N1);
495 }
496}
497
498/// hasNoVMLxHazardUse - Return true if it's desirable to select a FP MLA / MLS
499/// node. VFP / NEON fp VMLA / VMLS instructions have special RAW hazards (at
500/// least on current ARM implementations) which should be avoidded.
501bool ARMDAGToDAGISel::hasNoVMLxHazardUse(SDNode *N) const {
502 if (OptLevel == CodeGenOptLevel::None)
503 return true;
504
505 if (!Subtarget->hasVMLxHazards())
506 return true;
507
508 if (!N->hasOneUse())
509 return false;
510
511 SDNode *Use = *N->use_begin();
512 if (Use->getOpcode() == ISD::CopyToReg)
513 return true;
514 if (Use->isMachineOpcode()) {
515 const ARMBaseInstrInfo *TII = static_cast<const ARMBaseInstrInfo *>(
516 CurDAG->getSubtarget().getInstrInfo());
517
518 const MCInstrDesc &MCID = TII->get(Use->getMachineOpcode());
519 if (MCID.mayStore())
520 return true;
521 unsigned Opcode = MCID.getOpcode();
522 if (Opcode == ARM::VMOVRS || Opcode == ARM::VMOVRRD)
523 return true;
524 // vmlx feeding into another vmlx. We actually want to unfold
525 // the use later in the MLxExpansion pass. e.g.
526 // vmla
527 // vmla (stall 8 cycles)
528 //
529 // vmul (5 cycles)
530 // vadd (5 cycles)
531 // vmla
532 // This adds up to about 18 - 19 cycles.
533 //
534 // vmla
535 // vmul (stall 4 cycles)
536 // vadd adds up to about 14 cycles.
537 return TII->isFpMLxInstruction(Opcode);
538 }
539
540 return false;
541}
542
543bool ARMDAGToDAGISel::isShifterOpProfitable(const SDValue &Shift,
544 ARM_AM::ShiftOpc ShOpcVal,
545 unsigned ShAmt) {
546 if (!Subtarget->isLikeA9() && !Subtarget->isSwift())
547 return true;
548 if (Shift.hasOneUse())
549 return true;
550 // R << 2 is free.
551 return ShOpcVal == ARM_AM::lsl &&
552 (ShAmt == 2 || (Subtarget->isSwift() && ShAmt == 1));
553}
554
555bool ARMDAGToDAGISel::canExtractShiftFromMul(const SDValue &N,
556 unsigned MaxShift,
557 unsigned &PowerOfTwo,
558 SDValue &NewMulConst) const {
559 assert(N.getOpcode() == ISD::MUL);
560 assert(MaxShift > 0);
561
562 // If the multiply is used in more than one place then changing the constant
563 // will make other uses incorrect, so don't.
564 if (!N.hasOneUse()) return false;
565 // Check if the multiply is by a constant
566 ConstantSDNode *MulConst = dyn_cast<ConstantSDNode>(N.getOperand(1));
567 if (!MulConst) return false;
568 // If the constant is used in more than one place then modifying it will mean
569 // we need to materialize two constants instead of one, which is a bad idea.
570 if (!MulConst->hasOneUse()) return false;
571 unsigned MulConstVal = MulConst->getZExtValue();
572 if (MulConstVal == 0) return false;
573
574 // Find the largest power of 2 that MulConstVal is a multiple of
575 PowerOfTwo = MaxShift;
576 while ((MulConstVal % (1 << PowerOfTwo)) != 0) {
577 --PowerOfTwo;
578 if (PowerOfTwo == 0) return false;
579 }
580
581 // Only optimise if the new cost is better
582 unsigned NewMulConstVal = MulConstVal / (1 << PowerOfTwo);
583 NewMulConst = CurDAG->getConstant(NewMulConstVal, SDLoc(N), MVT::i32);
584 unsigned OldCost = ConstantMaterializationCost(MulConstVal, Subtarget);
585 unsigned NewCost = ConstantMaterializationCost(NewMulConstVal, Subtarget);
586 return NewCost < OldCost;
587}
588
589void ARMDAGToDAGISel::replaceDAGValue(const SDValue &N, SDValue M) {
590 CurDAG->RepositionNode(N.getNode()->getIterator(), M.getNode());
591 ReplaceUses(N, M);
592}
593
594bool ARMDAGToDAGISel::SelectImmShifterOperand(SDValue N,
595 SDValue &BaseReg,
596 SDValue &Opc,
597 bool CheckProfitability) {
599 return false;
600
601 // If N is a multiply-by-constant and it's profitable to extract a shift and
602 // use it in a shifted operand do so.
603 if (N.getOpcode() == ISD::MUL) {
604 unsigned PowerOfTwo = 0;
605 SDValue NewMulConst;
606 if (canExtractShiftFromMul(N, 31, PowerOfTwo, NewMulConst)) {
607 HandleSDNode Handle(N);
608 SDLoc Loc(N);
609 replaceDAGValue(N.getOperand(1), NewMulConst);
610 BaseReg = Handle.getValue();
611 Opc = CurDAG->getTargetConstant(
612 ARM_AM::getSORegOpc(ARM_AM::lsl, PowerOfTwo), Loc, MVT::i32);
613 return true;
614 }
615 }
616
617 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOpcode());
618
619 // Don't match base register only case. That is matched to a separate
620 // lower complexity pattern with explicit register operand.
621 if (ShOpcVal == ARM_AM::no_shift) return false;
622
623 BaseReg = N.getOperand(0);
624 unsigned ShImmVal = 0;
625 ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1));
626 if (!RHS) return false;
627 ShImmVal = RHS->getZExtValue() & 31;
628 Opc = CurDAG->getTargetConstant(ARM_AM::getSORegOpc(ShOpcVal, ShImmVal),
629 SDLoc(N), MVT::i32);
630 return true;
631}
632
633bool ARMDAGToDAGISel::SelectRegShifterOperand(SDValue N,
634 SDValue &BaseReg,
635 SDValue &ShReg,
636 SDValue &Opc,
637 bool CheckProfitability) {
639 return false;
640
641 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOpcode());
642
643 // Don't match base register only case. That is matched to a separate
644 // lower complexity pattern with explicit register operand.
645 if (ShOpcVal == ARM_AM::no_shift) return false;
646
647 BaseReg = N.getOperand(0);
648 unsigned ShImmVal = 0;
649 ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1));
650 if (RHS) return false;
651
652 ShReg = N.getOperand(1);
653 if (CheckProfitability && !isShifterOpProfitable(N, ShOpcVal, ShImmVal))
654 return false;
655 Opc = CurDAG->getTargetConstant(ARM_AM::getSORegOpc(ShOpcVal, ShImmVal),
656 SDLoc(N), MVT::i32);
657 return true;
658}
659
660// Determine whether an ISD::OR's operands are suitable to turn the operation
661// into an addition, which often has more compact encodings.
662bool ARMDAGToDAGISel::SelectAddLikeOr(SDNode *Parent, SDValue N, SDValue &Out) {
663 assert(Parent->getOpcode() == ISD::OR && "unexpected parent");
664 Out = N;
665 return CurDAG->haveNoCommonBitsSet(N, Parent->getOperand(1));
666}
667
668
669bool ARMDAGToDAGISel::SelectAddrModeImm12(SDValue N,
670 SDValue &Base,
671 SDValue &OffImm) {
672 // Match simple R + imm12 operands.
673
674 // Base only.
675 if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::SUB &&
676 !CurDAG->isBaseWithConstantOffset(N)) {
677 if (N.getOpcode() == ISD::FrameIndex) {
678 // Match frame index.
679 int FI = cast<FrameIndexSDNode>(N)->getIndex();
680 Base = CurDAG->getTargetFrameIndex(
681 FI, TLI->getPointerTy(CurDAG->getDataLayout()));
682 OffImm = CurDAG->getTargetConstant(0, SDLoc(N), MVT::i32);
683 return true;
684 }
685
686 if (N.getOpcode() == ARMISD::Wrapper &&
687 N.getOperand(0).getOpcode() != ISD::TargetGlobalAddress &&
688 N.getOperand(0).getOpcode() != ISD::TargetExternalSymbol &&
689 N.getOperand(0).getOpcode() != ISD::TargetGlobalTLSAddress) {
690 Base = N.getOperand(0);
691 } else
692 Base = N;
693 OffImm = CurDAG->getTargetConstant(0, SDLoc(N), MVT::i32);
694 return true;
695 }
696
697 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
698 int RHSC = (int)RHS->getSExtValue();
699 if (N.getOpcode() == ISD::SUB)
700 RHSC = -RHSC;
701
702 if (RHSC > -0x1000 && RHSC < 0x1000) { // 12 bits
703 Base = N.getOperand(0);
704 if (Base.getOpcode() == ISD::FrameIndex) {
705 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
706 Base = CurDAG->getTargetFrameIndex(
707 FI, TLI->getPointerTy(CurDAG->getDataLayout()));
708 }
709 OffImm = CurDAG->getTargetConstant(RHSC, SDLoc(N), MVT::i32);
710 return true;
711 }
712 }
713
714 // Base only.
715 Base = N;
716 OffImm = CurDAG->getTargetConstant(0, SDLoc(N), MVT::i32);
717 return true;
718}
719
720
721
722bool ARMDAGToDAGISel::SelectLdStSOReg(SDValue N, SDValue &Base, SDValue &Offset,
723 SDValue &Opc) {
724 if (N.getOpcode() == ISD::MUL &&
725 ((!Subtarget->isLikeA9() && !Subtarget->isSwift()) || N.hasOneUse())) {
726 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
727 // X * [3,5,9] -> X + X * [2,4,8] etc.
728 int RHSC = (int)RHS->getZExtValue();
729 if (RHSC & 1) {
730 RHSC = RHSC & ~1;
732 if (RHSC < 0) {
734 RHSC = - RHSC;
735 }
736 if (isPowerOf2_32(RHSC)) {
737 unsigned ShAmt = Log2_32(RHSC);
738 Base = Offset = N.getOperand(0);
739 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt,
741 SDLoc(N), MVT::i32);
742 return true;
743 }
744 }
745 }
746 }
747
748 if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::SUB &&
749 // ISD::OR that is equivalent to an ISD::ADD.
750 !CurDAG->isBaseWithConstantOffset(N))
751 return false;
752
753 // Leave simple R +/- imm12 operands for LDRi12
754 if (N.getOpcode() == ISD::ADD || N.getOpcode() == ISD::OR) {
755 int RHSC;
756 if (isScaledConstantInRange(N.getOperand(1), /*Scale=*/1,
757 -0x1000+1, 0x1000, RHSC)) // 12 bits.
758 return false;
759 }
760
761 // Otherwise this is R +/- [possibly shifted] R.
763 ARM_AM::ShiftOpc ShOpcVal =
764 ARM_AM::getShiftOpcForNode(N.getOperand(1).getOpcode());
765 unsigned ShAmt = 0;
766
767 Base = N.getOperand(0);
768 Offset = N.getOperand(1);
769
770 if (ShOpcVal != ARM_AM::no_shift) {
771 // Check to see if the RHS of the shift is a constant, if not, we can't fold
772 // it.
773 if (ConstantSDNode *Sh =
774 dyn_cast<ConstantSDNode>(N.getOperand(1).getOperand(1))) {
775 ShAmt = Sh->getZExtValue();
776 if (isShifterOpProfitable(Offset, ShOpcVal, ShAmt))
777 Offset = N.getOperand(1).getOperand(0);
778 else {
779 ShAmt = 0;
780 ShOpcVal = ARM_AM::no_shift;
781 }
782 } else {
783 ShOpcVal = ARM_AM::no_shift;
784 }
785 }
786
787 // Try matching (R shl C) + (R).
788 if (N.getOpcode() != ISD::SUB && ShOpcVal == ARM_AM::no_shift &&
789 !(Subtarget->isLikeA9() || Subtarget->isSwift() ||
790 N.getOperand(0).hasOneUse())) {
791 ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOperand(0).getOpcode());
792 if (ShOpcVal != ARM_AM::no_shift) {
793 // Check to see if the RHS of the shift is a constant, if not, we can't
794 // fold it.
795 if (ConstantSDNode *Sh =
796 dyn_cast<ConstantSDNode>(N.getOperand(0).getOperand(1))) {
797 ShAmt = Sh->getZExtValue();
798 if (isShifterOpProfitable(N.getOperand(0), ShOpcVal, ShAmt)) {
799 Offset = N.getOperand(0).getOperand(0);
800 Base = N.getOperand(1);
801 } else {
802 ShAmt = 0;
803 ShOpcVal = ARM_AM::no_shift;
804 }
805 } else {
806 ShOpcVal = ARM_AM::no_shift;
807 }
808 }
809 }
810
811 // If Offset is a multiply-by-constant and it's profitable to extract a shift
812 // and use it in a shifted operand do so.
813 if (Offset.getOpcode() == ISD::MUL && N.hasOneUse()) {
814 unsigned PowerOfTwo = 0;
815 SDValue NewMulConst;
816 if (canExtractShiftFromMul(Offset, 31, PowerOfTwo, NewMulConst)) {
817 HandleSDNode Handle(Offset);
818 replaceDAGValue(Offset.getOperand(1), NewMulConst);
819 Offset = Handle.getValue();
820 ShAmt = PowerOfTwo;
821 ShOpcVal = ARM_AM::lsl;
822 }
823 }
824
825 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt, ShOpcVal),
826 SDLoc(N), MVT::i32);
827 return true;
828}
829
830bool ARMDAGToDAGISel::SelectAddrMode2OffsetReg(SDNode *Op, SDValue N,
831 SDValue &Offset, SDValue &Opc) {
832 unsigned Opcode = Op->getOpcode();
833 ISD::MemIndexedMode AM = (Opcode == ISD::LOAD)
834 ? cast<LoadSDNode>(Op)->getAddressingMode()
835 : cast<StoreSDNode>(Op)->getAddressingMode();
838 int Val;
839 if (isScaledConstantInRange(N, /*Scale=*/1, 0, 0x1000, Val))
840 return false;
841
842 Offset = N;
843 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOpcode());
844 unsigned ShAmt = 0;
845 if (ShOpcVal != ARM_AM::no_shift) {
846 // Check to see if the RHS of the shift is a constant, if not, we can't fold
847 // it.
848 if (ConstantSDNode *Sh = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
849 ShAmt = Sh->getZExtValue();
850 if (isShifterOpProfitable(N, ShOpcVal, ShAmt))
851 Offset = N.getOperand(0);
852 else {
853 ShAmt = 0;
854 ShOpcVal = ARM_AM::no_shift;
855 }
856 } else {
857 ShOpcVal = ARM_AM::no_shift;
858 }
859 }
860
861 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt, ShOpcVal),
862 SDLoc(N), MVT::i32);
863 return true;
864}
865
866bool ARMDAGToDAGISel::SelectAddrMode2OffsetImmPre(SDNode *Op, SDValue N,
867 SDValue &Offset, SDValue &Opc) {
868 unsigned Opcode = Op->getOpcode();
869 ISD::MemIndexedMode AM = (Opcode == ISD::LOAD)
870 ? cast<LoadSDNode>(Op)->getAddressingMode()
871 : cast<StoreSDNode>(Op)->getAddressingMode();
874 int Val;
875 if (isScaledConstantInRange(N, /*Scale=*/1, 0, 0x1000, Val)) { // 12 bits.
876 if (AddSub == ARM_AM::sub) Val *= -1;
877 Offset = CurDAG->getRegister(0, MVT::i32);
878 Opc = CurDAG->getTargetConstant(Val, SDLoc(Op), MVT::i32);
879 return true;
880 }
881
882 return false;
883}
884
885
886bool ARMDAGToDAGISel::SelectAddrMode2OffsetImm(SDNode *Op, SDValue N,
887 SDValue &Offset, SDValue &Opc) {
888 unsigned Opcode = Op->getOpcode();
889 ISD::MemIndexedMode AM = (Opcode == ISD::LOAD)
890 ? cast<LoadSDNode>(Op)->getAddressingMode()
891 : cast<StoreSDNode>(Op)->getAddressingMode();
894 int Val;
895 if (isScaledConstantInRange(N, /*Scale=*/1, 0, 0x1000, Val)) { // 12 bits.
896 Offset = CurDAG->getRegister(0, MVT::i32);
897 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, Val,
899 SDLoc(Op), MVT::i32);
900 return true;
901 }
902
903 return false;
904}
905
906bool ARMDAGToDAGISel::SelectAddrOffsetNone(SDValue N, SDValue &Base) {
907 Base = N;
908 return true;
909}
910
911bool ARMDAGToDAGISel::SelectAddrMode3(SDValue N,
913 SDValue &Opc) {
914 if (N.getOpcode() == ISD::SUB) {
915 // X - C is canonicalize to X + -C, no need to handle it here.
916 Base = N.getOperand(0);
917 Offset = N.getOperand(1);
918 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(ARM_AM::sub, 0), SDLoc(N),
919 MVT::i32);
920 return true;
921 }
922
923 if (!CurDAG->isBaseWithConstantOffset(N)) {
924 Base = N;
925 if (N.getOpcode() == ISD::FrameIndex) {
926 int FI = cast<FrameIndexSDNode>(N)->getIndex();
927 Base = CurDAG->getTargetFrameIndex(
928 FI, TLI->getPointerTy(CurDAG->getDataLayout()));
929 }
930 Offset = CurDAG->getRegister(0, MVT::i32);
931 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(ARM_AM::add, 0), SDLoc(N),
932 MVT::i32);
933 return true;
934 }
935
936 // If the RHS is +/- imm8, fold into addr mode.
937 int RHSC;
938 if (isScaledConstantInRange(N.getOperand(1), /*Scale=*/1,
939 -256 + 1, 256, RHSC)) { // 8 bits.
940 Base = N.getOperand(0);
941 if (Base.getOpcode() == ISD::FrameIndex) {
942 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
943 Base = CurDAG->getTargetFrameIndex(
944 FI, TLI->getPointerTy(CurDAG->getDataLayout()));
945 }
946 Offset = CurDAG->getRegister(0, MVT::i32);
947
949 if (RHSC < 0) {
951 RHSC = -RHSC;
952 }
953 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(AddSub, RHSC), SDLoc(N),
954 MVT::i32);
955 return true;
956 }
957
958 Base = N.getOperand(0);
959 Offset = N.getOperand(1);
960 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(ARM_AM::add, 0), SDLoc(N),
961 MVT::i32);
962 return true;
963}
964
965bool ARMDAGToDAGISel::SelectAddrMode3Offset(SDNode *Op, SDValue N,
966 SDValue &Offset, SDValue &Opc) {
967 unsigned Opcode = Op->getOpcode();
968 ISD::MemIndexedMode AM = (Opcode == ISD::LOAD)
969 ? cast<LoadSDNode>(Op)->getAddressingMode()
970 : cast<StoreSDNode>(Op)->getAddressingMode();
973 int Val;
974 if (isScaledConstantInRange(N, /*Scale=*/1, 0, 256, Val)) { // 12 bits.
975 Offset = CurDAG->getRegister(0, MVT::i32);
976 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(AddSub, Val), SDLoc(Op),
977 MVT::i32);
978 return true;
979 }
980
981 Offset = N;
982 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(AddSub, 0), SDLoc(Op),
983 MVT::i32);
984 return true;
985}
986
987bool ARMDAGToDAGISel::IsAddressingMode5(SDValue N, SDValue &Base, SDValue &Offset,
988 bool FP16) {
989 if (!CurDAG->isBaseWithConstantOffset(N)) {
990 Base = N;
991 if (N.getOpcode() == ISD::FrameIndex) {
992 int FI = cast<FrameIndexSDNode>(N)->getIndex();
993 Base = CurDAG->getTargetFrameIndex(
994 FI, TLI->getPointerTy(CurDAG->getDataLayout()));
995 } else if (N.getOpcode() == ARMISD::Wrapper &&
996 N.getOperand(0).getOpcode() != ISD::TargetGlobalAddress &&
997 N.getOperand(0).getOpcode() != ISD::TargetExternalSymbol &&
998 N.getOperand(0).getOpcode() != ISD::TargetGlobalTLSAddress) {
999 Base = N.getOperand(0);
1000 }
1001 Offset = CurDAG->getTargetConstant(ARM_AM::getAM5Opc(ARM_AM::add, 0),
1002 SDLoc(N), MVT::i32);
1003 return true;
1004 }
1005
1006 // If the RHS is +/- imm8, fold into addr mode.
1007 int RHSC;
1008 const int Scale = FP16 ? 2 : 4;
1009
1010 if (isScaledConstantInRange(N.getOperand(1), Scale, -255, 256, RHSC)) {
1011 Base = N.getOperand(0);
1012 if (Base.getOpcode() == ISD::FrameIndex) {
1013 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
1014 Base = CurDAG->getTargetFrameIndex(
1015 FI, TLI->getPointerTy(CurDAG->getDataLayout()));
1016 }
1017
1019 if (RHSC < 0) {
1021 RHSC = -RHSC;
1022 }
1023
1024 if (FP16)
1025 Offset = CurDAG->getTargetConstant(ARM_AM::getAM5FP16Opc(AddSub, RHSC),
1026 SDLoc(N), MVT::i32);
1027 else
1028 Offset = CurDAG->getTargetConstant(ARM_AM::getAM5Opc(AddSub, RHSC),
1029 SDLoc(N), MVT::i32);
1030
1031 return true;
1032 }
1033
1034 Base = N;
1035
1036 if (FP16)
1037 Offset = CurDAG->getTargetConstant(ARM_AM::getAM5FP16Opc(ARM_AM::add, 0),
1038 SDLoc(N), MVT::i32);
1039 else
1040 Offset = CurDAG->getTargetConstant(ARM_AM::getAM5Opc(ARM_AM::add, 0),
1041 SDLoc(N), MVT::i32);
1042
1043 return true;
1044}
1045
1046bool ARMDAGToDAGISel::SelectAddrMode5(SDValue N,
1048 return IsAddressingMode5(N, Base, Offset, /*FP16=*/ false);
1049}
1050
1051bool ARMDAGToDAGISel::SelectAddrMode5FP16(SDValue N,
1053 return IsAddressingMode5(N, Base, Offset, /*FP16=*/ true);
1054}
1055
1056bool ARMDAGToDAGISel::SelectAddrMode6(SDNode *Parent, SDValue N, SDValue &Addr,
1057 SDValue &Align) {
1058 Addr = N;
1059
1060 unsigned Alignment = 0;
1061
1062 MemSDNode *MemN = cast<MemSDNode>(Parent);
1063
1064 if (isa<LSBaseSDNode>(MemN) ||
1065 ((MemN->getOpcode() == ARMISD::VST1_UPD ||
1066 MemN->getOpcode() == ARMISD::VLD1_UPD) &&
1067 MemN->getConstantOperandVal(MemN->getNumOperands() - 1) == 1)) {
1068 // This case occurs only for VLD1-lane/dup and VST1-lane instructions.
1069 // The maximum alignment is equal to the memory size being referenced.
1070 llvm::Align MMOAlign = MemN->getAlign();
1071 unsigned MemSize = MemN->getMemoryVT().getSizeInBits() / 8;
1072 if (MMOAlign.value() >= MemSize && MemSize > 1)
1073 Alignment = MemSize;
1074 } else {
1075 // All other uses of addrmode6 are for intrinsics. For now just record
1076 // the raw alignment value; it will be refined later based on the legal
1077 // alignment operands for the intrinsic.
1078 Alignment = MemN->getAlign().value();
1079 }
1080
1081 Align = CurDAG->getTargetConstant(Alignment, SDLoc(N), MVT::i32);
1082 return true;
1083}
1084
1085bool ARMDAGToDAGISel::SelectAddrMode6Offset(SDNode *Op, SDValue N,
1086 SDValue &Offset) {
1087 LSBaseSDNode *LdSt = cast<LSBaseSDNode>(Op);
1089 if (AM != ISD::POST_INC)
1090 return false;
1091 Offset = N;
1092 if (ConstantSDNode *NC = dyn_cast<ConstantSDNode>(N)) {
1093 if (NC->getZExtValue() * 8 == LdSt->getMemoryVT().getSizeInBits())
1094 Offset = CurDAG->getRegister(0, MVT::i32);
1095 }
1096 return true;
1097}
1098
1099bool ARMDAGToDAGISel::SelectAddrModePC(SDValue N,
1100 SDValue &Offset, SDValue &Label) {
1101 if (N.getOpcode() == ARMISD::PIC_ADD && N.hasOneUse()) {
1102 Offset = N.getOperand(0);
1103 SDValue N1 = N.getOperand(1);
1104 Label = CurDAG->getTargetConstant(cast<ConstantSDNode>(N1)->getZExtValue(),
1105 SDLoc(N), MVT::i32);
1106 return true;
1107 }
1108
1109 return false;
1110}
1111
1112
1113//===----------------------------------------------------------------------===//
1114// Thumb Addressing Modes
1115//===----------------------------------------------------------------------===//
1116
1118 // Negative numbers are difficult to materialise in thumb1. If we are
1119 // selecting the add of a negative, instead try to select ri with a zero
1120 // offset, so create the add node directly which will become a sub.
1121 if (N.getOpcode() != ISD::ADD)
1122 return false;
1123
1124 // Look for an imm which is not legal for ld/st, but is legal for sub.
1125 if (auto C = dyn_cast<ConstantSDNode>(N.getOperand(1)))
1126 return C->getSExtValue() < 0 && C->getSExtValue() >= -255;
1127
1128 return false;
1129}
1130
1131bool ARMDAGToDAGISel::SelectThumbAddrModeRRSext(SDValue N, SDValue &Base,
1132 SDValue &Offset) {
1133 if (N.getOpcode() != ISD::ADD && !CurDAG->isBaseWithConstantOffset(N)) {
1134 if (!isNullConstant(N))
1135 return false;
1136
1137 Base = Offset = N;
1138 return true;
1139 }
1140
1141 Base = N.getOperand(0);
1142 Offset = N.getOperand(1);
1143 return true;
1144}
1145
1146bool ARMDAGToDAGISel::SelectThumbAddrModeRR(SDValue N, SDValue &Base,
1147 SDValue &Offset) {
1149 return false; // Select ri instead
1150 return SelectThumbAddrModeRRSext(N, Base, Offset);
1151}
1152
1153bool
1154ARMDAGToDAGISel::SelectThumbAddrModeImm5S(SDValue N, unsigned Scale,
1155 SDValue &Base, SDValue &OffImm) {
1157 Base = N;
1158 OffImm = CurDAG->getTargetConstant(0, SDLoc(N), MVT::i32);
1159 return true;
1160 }
1161
1162 if (!CurDAG->isBaseWithConstantOffset(N)) {
1163 if (N.getOpcode() == ISD::ADD) {
1164 return false; // We want to select register offset instead
1165 } else if (N.getOpcode() == ARMISD::Wrapper &&
1166 N.getOperand(0).getOpcode() != ISD::TargetGlobalAddress &&
1167 N.getOperand(0).getOpcode() != ISD::TargetExternalSymbol &&
1168 N.getOperand(0).getOpcode() != ISD::TargetConstantPool &&
1169 N.getOperand(0).getOpcode() != ISD::TargetGlobalTLSAddress) {
1170 Base = N.getOperand(0);
1171 } else {
1172 Base = N;
1173 }
1174
1175 OffImm = CurDAG->getTargetConstant(0, SDLoc(N), MVT::i32);
1176 return true;
1177 }
1178
1179 // If the RHS is + imm5 * scale, fold into addr mode.
1180 int RHSC;
1181 if (isScaledConstantInRange(N.getOperand(1), Scale, 0, 32, RHSC)) {
1182 Base = N.getOperand(0);
1183 OffImm = CurDAG->getTargetConstant(RHSC, SDLoc(N), MVT::i32);
1184 return true;
1185 }
1186
1187 // Offset is too large, so use register offset instead.
1188 return false;
1189}
1190
1191bool
1192ARMDAGToDAGISel::SelectThumbAddrModeImm5S4(SDValue N, SDValue &Base,
1193 SDValue &OffImm) {
1194 return SelectThumbAddrModeImm5S(N, 4, Base, OffImm);
1195}
1196
1197bool
1198ARMDAGToDAGISel::SelectThumbAddrModeImm5S2(SDValue N, SDValue &Base,
1199 SDValue &OffImm) {
1200 return SelectThumbAddrModeImm5S(N, 2, Base, OffImm);
1201}
1202
1203bool
1204ARMDAGToDAGISel::SelectThumbAddrModeImm5S1(SDValue N, SDValue &Base,
1205 SDValue &OffImm) {
1206 return SelectThumbAddrModeImm5S(N, 1, Base, OffImm);
1207}
1208
1209bool ARMDAGToDAGISel::SelectThumbAddrModeSP(SDValue N,
1210 SDValue &Base, SDValue &OffImm) {
1211 if (N.getOpcode() == ISD::FrameIndex) {
1212 int FI = cast<FrameIndexSDNode>(N)->getIndex();
1213 // Only multiples of 4 are allowed for the offset, so the frame object
1214 // alignment must be at least 4.
1215 MachineFrameInfo &MFI = MF->getFrameInfo();
1216 if (MFI.getObjectAlign(FI) < Align(4))
1217 MFI.setObjectAlignment(FI, Align(4));
1218 Base = CurDAG->getTargetFrameIndex(
1219 FI, TLI->getPointerTy(CurDAG->getDataLayout()));
1220 OffImm = CurDAG->getTargetConstant(0, SDLoc(N), MVT::i32);
1221 return true;
1222 }
1223
1224 if (!CurDAG->isBaseWithConstantOffset(N))
1225 return false;
1226
1227 if (N.getOperand(0).getOpcode() == ISD::FrameIndex) {
1228 // If the RHS is + imm8 * scale, fold into addr mode.
1229 int RHSC;
1230 if (isScaledConstantInRange(N.getOperand(1), /*Scale=*/4, 0, 256, RHSC)) {
1231 Base = N.getOperand(0);
1232 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
1233 // Make sure the offset is inside the object, or we might fail to
1234 // allocate an emergency spill slot. (An out-of-range access is UB, but
1235 // it could show up anyway.)
1236 MachineFrameInfo &MFI = MF->getFrameInfo();
1237 if (RHSC * 4 < MFI.getObjectSize(FI)) {
1238 // For LHS+RHS to result in an offset that's a multiple of 4 the object
1239 // indexed by the LHS must be 4-byte aligned.
1240 if (!MFI.isFixedObjectIndex(FI) && MFI.getObjectAlign(FI) < Align(4))
1241 MFI.setObjectAlignment(FI, Align(4));
1242 if (MFI.getObjectAlign(FI) >= Align(4)) {
1243 Base = CurDAG->getTargetFrameIndex(
1244 FI, TLI->getPointerTy(CurDAG->getDataLayout()));
1245 OffImm = CurDAG->getTargetConstant(RHSC, SDLoc(N), MVT::i32);
1246 return true;
1247 }
1248 }
1249 }
1250 }
1251
1252 return false;
1253}
1254
1255template <unsigned Shift>
1256bool ARMDAGToDAGISel::SelectTAddrModeImm7(SDValue N, SDValue &Base,
1257 SDValue &OffImm) {
1258 if (N.getOpcode() == ISD::SUB || CurDAG->isBaseWithConstantOffset(N)) {
1259 int RHSC;
1260 if (isScaledConstantInRange(N.getOperand(1), 1 << Shift, -0x7f, 0x80,
1261 RHSC)) {
1262 Base = N.getOperand(0);
1263 if (N.getOpcode() == ISD::SUB)
1264 RHSC = -RHSC;
1265 OffImm =
1266 CurDAG->getTargetConstant(RHSC * (1 << Shift), SDLoc(N), MVT::i32);
1267 return true;
1268 }
1269 }
1270
1271 // Base only.
1272 Base = N;
1273 OffImm = CurDAG->getTargetConstant(0, SDLoc(N), MVT::i32);
1274 return true;
1275}
1276
1277
1278//===----------------------------------------------------------------------===//
1279// Thumb 2 Addressing Modes
1280//===----------------------------------------------------------------------===//
1281
1282
1283bool ARMDAGToDAGISel::SelectT2AddrModeImm12(SDValue N,
1284 SDValue &Base, SDValue &OffImm) {
1285 // Match simple R + imm12 operands.
1286
1287 // Base only.
1288 if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::SUB &&
1289 !CurDAG->isBaseWithConstantOffset(N)) {
1290 if (N.getOpcode() == ISD::FrameIndex) {
1291 // Match frame index.
1292 int FI = cast<FrameIndexSDNode>(N)->getIndex();
1293 Base = CurDAG->getTargetFrameIndex(
1294 FI, TLI->getPointerTy(CurDAG->getDataLayout()));
1295 OffImm = CurDAG->getTargetConstant(0, SDLoc(N), MVT::i32);
1296 return true;
1297 }
1298
1299 if (N.getOpcode() == ARMISD::Wrapper &&
1300 N.getOperand(0).getOpcode() != ISD::TargetGlobalAddress &&
1301 N.getOperand(0).getOpcode() != ISD::TargetExternalSymbol &&
1302 N.getOperand(0).getOpcode() != ISD::TargetGlobalTLSAddress) {
1303 Base = N.getOperand(0);
1304 if (Base.getOpcode() == ISD::TargetConstantPool)
1305 return false; // We want to select t2LDRpci instead.
1306 } else
1307 Base = N;
1308 OffImm = CurDAG->getTargetConstant(0, SDLoc(N), MVT::i32);
1309 return true;
1310 }
1311
1312 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1313 if (SelectT2AddrModeImm8(N, Base, OffImm))
1314 // Let t2LDRi8 handle (R - imm8).
1315 return false;
1316
1317 int RHSC = (int)RHS->getZExtValue();
1318 if (N.getOpcode() == ISD::SUB)
1319 RHSC = -RHSC;
1320
1321 if (RHSC >= 0 && RHSC < 0x1000) { // 12 bits (unsigned)
1322 Base = N.getOperand(0);
1323 if (Base.getOpcode() == ISD::FrameIndex) {
1324 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
1325 Base = CurDAG->getTargetFrameIndex(
1326 FI, TLI->getPointerTy(CurDAG->getDataLayout()));
1327 }
1328 OffImm = CurDAG->getTargetConstant(RHSC, SDLoc(N), MVT::i32);
1329 return true;
1330 }
1331 }
1332
1333 // Base only.
1334 Base = N;
1335 OffImm = CurDAG->getTargetConstant(0, SDLoc(N), MVT::i32);
1336 return true;
1337}
1338
1339template <unsigned Shift>
1340bool ARMDAGToDAGISel::SelectT2AddrModeImm8(SDValue N, SDValue &Base,
1341 SDValue &OffImm) {
1342 if (N.getOpcode() == ISD::SUB || CurDAG->isBaseWithConstantOffset(N)) {
1343 int RHSC;
1344 if (isScaledConstantInRange(N.getOperand(1), 1 << Shift, -255, 256, RHSC)) {
1345 Base = N.getOperand(0);
1346 if (Base.getOpcode() == ISD::FrameIndex) {
1347 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
1348 Base = CurDAG->getTargetFrameIndex(
1349 FI, TLI->getPointerTy(CurDAG->getDataLayout()));
1350 }
1351
1352 if (N.getOpcode() == ISD::SUB)
1353 RHSC = -RHSC;
1354 OffImm =
1355 CurDAG->getTargetConstant(RHSC * (1 << Shift), SDLoc(N), MVT::i32);
1356 return true;
1357 }
1358 }
1359
1360 // Base only.
1361 Base = N;
1362 OffImm = CurDAG->getTargetConstant(0, SDLoc(N), MVT::i32);
1363 return true;
1364}
1365
1366bool ARMDAGToDAGISel::SelectT2AddrModeImm8(SDValue N,
1367 SDValue &Base, SDValue &OffImm) {
1368 // Match simple R - imm8 operands.
1369 if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::SUB &&
1370 !CurDAG->isBaseWithConstantOffset(N))
1371 return false;
1372
1373 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1374 int RHSC = (int)RHS->getSExtValue();
1375 if (N.getOpcode() == ISD::SUB)
1376 RHSC = -RHSC;
1377
1378 if ((RHSC >= -255) && (RHSC < 0)) { // 8 bits (always negative)
1379 Base = N.getOperand(0);
1380 if (Base.getOpcode() == ISD::FrameIndex) {
1381 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
1382 Base = CurDAG->getTargetFrameIndex(
1383 FI, TLI->getPointerTy(CurDAG->getDataLayout()));
1384 }
1385 OffImm = CurDAG->getTargetConstant(RHSC, SDLoc(N), MVT::i32);
1386 return true;
1387 }
1388 }
1389
1390 return false;
1391}
1392
1393bool ARMDAGToDAGISel::SelectT2AddrModeImm8Offset(SDNode *Op, SDValue N,
1394 SDValue &OffImm){
1395 unsigned Opcode = Op->getOpcode();
1396 ISD::MemIndexedMode AM = (Opcode == ISD::LOAD)
1397 ? cast<LoadSDNode>(Op)->getAddressingMode()
1398 : cast<StoreSDNode>(Op)->getAddressingMode();
1399 int RHSC;
1400 if (isScaledConstantInRange(N, /*Scale=*/1, 0, 0x100, RHSC)) { // 8 bits.
1401 OffImm = ((AM == ISD::PRE_INC) || (AM == ISD::POST_INC))
1402 ? CurDAG->getTargetConstant(RHSC, SDLoc(N), MVT::i32)
1403 : CurDAG->getTargetConstant(-RHSC, SDLoc(N), MVT::i32);
1404 return true;
1405 }
1406
1407 return false;
1408}
1409
1410template <unsigned Shift>
1411bool ARMDAGToDAGISel::SelectT2AddrModeImm7(SDValue N, SDValue &Base,
1412 SDValue &OffImm) {
1413 if (N.getOpcode() == ISD::SUB || CurDAG->isBaseWithConstantOffset(N)) {
1414 int RHSC;
1415 if (isScaledConstantInRange(N.getOperand(1), 1 << Shift, -0x7f, 0x80,
1416 RHSC)) {
1417 Base = N.getOperand(0);
1418 if (Base.getOpcode() == ISD::FrameIndex) {
1419 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
1420 Base = CurDAG->getTargetFrameIndex(
1421 FI, TLI->getPointerTy(CurDAG->getDataLayout()));
1422 }
1423
1424 if (N.getOpcode() == ISD::SUB)
1425 RHSC = -RHSC;
1426 OffImm =
1427 CurDAG->getTargetConstant(RHSC * (1 << Shift), SDLoc(N), MVT::i32);
1428 return true;
1429 }
1430 }
1431
1432 // Base only.
1433 Base = N;
1434 OffImm = CurDAG->getTargetConstant(0, SDLoc(N), MVT::i32);
1435 return true;
1436}
1437
1438template <unsigned Shift>
1439bool ARMDAGToDAGISel::SelectT2AddrModeImm7Offset(SDNode *Op, SDValue N,
1440 SDValue &OffImm) {
1441 return SelectT2AddrModeImm7Offset(Op, N, OffImm, Shift);
1442}
1443
1444bool ARMDAGToDAGISel::SelectT2AddrModeImm7Offset(SDNode *Op, SDValue N,
1445 SDValue &OffImm,
1446 unsigned Shift) {
1447 unsigned Opcode = Op->getOpcode();
1449 switch (Opcode) {
1450 case ISD::LOAD:
1451 AM = cast<LoadSDNode>(Op)->getAddressingMode();
1452 break;
1453 case ISD::STORE:
1454 AM = cast<StoreSDNode>(Op)->getAddressingMode();
1455 break;
1456 case ISD::MLOAD:
1457 AM = cast<MaskedLoadSDNode>(Op)->getAddressingMode();
1458 break;
1459 case ISD::MSTORE:
1460 AM = cast<MaskedStoreSDNode>(Op)->getAddressingMode();
1461 break;
1462 default:
1463 llvm_unreachable("Unexpected Opcode for Imm7Offset");
1464 }
1465
1466 int RHSC;
1467 // 7 bit constant, shifted by Shift.
1468 if (isScaledConstantInRange(N, 1 << Shift, 0, 0x80, RHSC)) {
1469 OffImm =
1470 ((AM == ISD::PRE_INC) || (AM == ISD::POST_INC))
1471 ? CurDAG->getTargetConstant(RHSC * (1 << Shift), SDLoc(N), MVT::i32)
1472 : CurDAG->getTargetConstant(-RHSC * (1 << Shift), SDLoc(N),
1473 MVT::i32);
1474 return true;
1475 }
1476 return false;
1477}
1478
1479template <int Min, int Max>
1480bool ARMDAGToDAGISel::SelectImmediateInRange(SDValue N, SDValue &OffImm) {
1481 int Val;
1482 if (isScaledConstantInRange(N, 1, Min, Max, Val)) {
1483 OffImm = CurDAG->getTargetConstant(Val, SDLoc(N), MVT::i32);
1484 return true;
1485 }
1486 return false;
1487}
1488
1489bool ARMDAGToDAGISel::SelectT2AddrModeSoReg(SDValue N,
1490 SDValue &Base,
1491 SDValue &OffReg, SDValue &ShImm) {
1492 // (R - imm8) should be handled by t2LDRi8. The rest are handled by t2LDRi12.
1493 if (N.getOpcode() != ISD::ADD && !CurDAG->isBaseWithConstantOffset(N))
1494 return false;
1495
1496 // Leave (R + imm12) for t2LDRi12, (R - imm8) for t2LDRi8.
1497 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1498 int RHSC = (int)RHS->getZExtValue();
1499 if (RHSC >= 0 && RHSC < 0x1000) // 12 bits (unsigned)
1500 return false;
1501 else if (RHSC < 0 && RHSC >= -255) // 8 bits
1502 return false;
1503 }
1504
1505 // Look for (R + R) or (R + (R << [1,2,3])).
1506 unsigned ShAmt = 0;
1507 Base = N.getOperand(0);
1508 OffReg = N.getOperand(1);
1509
1510 // Swap if it is ((R << c) + R).
1512 if (ShOpcVal != ARM_AM::lsl) {
1513 ShOpcVal = ARM_AM::getShiftOpcForNode(Base.getOpcode());
1514 if (ShOpcVal == ARM_AM::lsl)
1515 std::swap(Base, OffReg);
1516 }
1517
1518 if (ShOpcVal == ARM_AM::lsl) {
1519 // Check to see if the RHS of the shift is a constant, if not, we can't fold
1520 // it.
1521 if (ConstantSDNode *Sh = dyn_cast<ConstantSDNode>(OffReg.getOperand(1))) {
1522 ShAmt = Sh->getZExtValue();
1523 if (ShAmt < 4 && isShifterOpProfitable(OffReg, ShOpcVal, ShAmt))
1524 OffReg = OffReg.getOperand(0);
1525 else {
1526 ShAmt = 0;
1527 }
1528 }
1529 }
1530
1531 // If OffReg is a multiply-by-constant and it's profitable to extract a shift
1532 // and use it in a shifted operand do so.
1533 if (OffReg.getOpcode() == ISD::MUL && N.hasOneUse()) {
1534 unsigned PowerOfTwo = 0;
1535 SDValue NewMulConst;
1536 if (canExtractShiftFromMul(OffReg, 3, PowerOfTwo, NewMulConst)) {
1537 HandleSDNode Handle(OffReg);
1538 replaceDAGValue(OffReg.getOperand(1), NewMulConst);
1539 OffReg = Handle.getValue();
1540 ShAmt = PowerOfTwo;
1541 }
1542 }
1543
1544 ShImm = CurDAG->getTargetConstant(ShAmt, SDLoc(N), MVT::i32);
1545
1546 return true;
1547}
1548
1549bool ARMDAGToDAGISel::SelectT2AddrModeExclusive(SDValue N, SDValue &Base,
1550 SDValue &OffImm) {
1551 // This *must* succeed since it's used for the irreplaceable ldrex and strex
1552 // instructions.
1553 Base = N;
1554 OffImm = CurDAG->getTargetConstant(0, SDLoc(N), MVT::i32);
1555
1556 if (N.getOpcode() != ISD::ADD || !CurDAG->isBaseWithConstantOffset(N))
1557 return true;
1558
1559 ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1));
1560 if (!RHS)
1561 return true;
1562
1563 uint32_t RHSC = (int)RHS->getZExtValue();
1564 if (RHSC > 1020 || RHSC % 4 != 0)
1565 return true;
1566
1567 Base = N.getOperand(0);
1568 if (Base.getOpcode() == ISD::FrameIndex) {
1569 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
1570 Base = CurDAG->getTargetFrameIndex(
1571 FI, TLI->getPointerTy(CurDAG->getDataLayout()));
1572 }
1573
1574 OffImm = CurDAG->getTargetConstant(RHSC/4, SDLoc(N), MVT::i32);
1575 return true;
1576}
1577
1578//===--------------------------------------------------------------------===//
1579
1580/// getAL - Returns a ARMCC::AL immediate node.
1581static inline SDValue getAL(SelectionDAG *CurDAG, const SDLoc &dl) {
1582 return CurDAG->getTargetConstant((uint64_t)ARMCC::AL, dl, MVT::i32);
1583}
1584
1585void ARMDAGToDAGISel::transferMemOperands(SDNode *N, SDNode *Result) {
1586 MachineMemOperand *MemOp = cast<MemSDNode>(N)->getMemOperand();
1587 CurDAG->setNodeMemRefs(cast<MachineSDNode>(Result), {MemOp});
1588}
1589
1590bool ARMDAGToDAGISel::tryARMIndexedLoad(SDNode *N) {
1591 LoadSDNode *LD = cast<LoadSDNode>(N);
1592 ISD::MemIndexedMode AM = LD->getAddressingMode();
1593 if (AM == ISD::UNINDEXED)
1594 return false;
1595
1596 EVT LoadedVT = LD->getMemoryVT();
1597 SDValue Offset, AMOpc;
1598 bool isPre = (AM == ISD::PRE_INC) || (AM == ISD::PRE_DEC);
1599 unsigned Opcode = 0;
1600 bool Match = false;
1601 if (LoadedVT == MVT::i32 && isPre &&
1602 SelectAddrMode2OffsetImmPre(N, LD->getOffset(), Offset, AMOpc)) {
1603 Opcode = ARM::LDR_PRE_IMM;
1604 Match = true;
1605 } else if (LoadedVT == MVT::i32 && !isPre &&
1606 SelectAddrMode2OffsetImm(N, LD->getOffset(), Offset, AMOpc)) {
1607 Opcode = ARM::LDR_POST_IMM;
1608 Match = true;
1609 } else if (LoadedVT == MVT::i32 &&
1610 SelectAddrMode2OffsetReg(N, LD->getOffset(), Offset, AMOpc)) {
1611 Opcode = isPre ? ARM::LDR_PRE_REG : ARM::LDR_POST_REG;
1612 Match = true;
1613
1614 } else if (LoadedVT == MVT::i16 &&
1615 SelectAddrMode3Offset(N, LD->getOffset(), Offset, AMOpc)) {
1616 Match = true;
1617 Opcode = (LD->getExtensionType() == ISD::SEXTLOAD)
1618 ? (isPre ? ARM::LDRSH_PRE : ARM::LDRSH_POST)
1619 : (isPre ? ARM::LDRH_PRE : ARM::LDRH_POST);
1620 } else if (LoadedVT == MVT::i8 || LoadedVT == MVT::i1) {
1621 if (LD->getExtensionType() == ISD::SEXTLOAD) {
1622 if (SelectAddrMode3Offset(N, LD->getOffset(), Offset, AMOpc)) {
1623 Match = true;
1624 Opcode = isPre ? ARM::LDRSB_PRE : ARM::LDRSB_POST;
1625 }
1626 } else {
1627 if (isPre &&
1628 SelectAddrMode2OffsetImmPre(N, LD->getOffset(), Offset, AMOpc)) {
1629 Match = true;
1630 Opcode = ARM::LDRB_PRE_IMM;
1631 } else if (!isPre &&
1632 SelectAddrMode2OffsetImm(N, LD->getOffset(), Offset, AMOpc)) {
1633 Match = true;
1634 Opcode = ARM::LDRB_POST_IMM;
1635 } else if (SelectAddrMode2OffsetReg(N, LD->getOffset(), Offset, AMOpc)) {
1636 Match = true;
1637 Opcode = isPre ? ARM::LDRB_PRE_REG : ARM::LDRB_POST_REG;
1638 }
1639 }
1640 }
1641
1642 if (Match) {
1643 if (Opcode == ARM::LDR_PRE_IMM || Opcode == ARM::LDRB_PRE_IMM) {
1644 SDValue Chain = LD->getChain();
1645 SDValue Base = LD->getBasePtr();
1646 SDValue Ops[]= { Base, AMOpc, getAL(CurDAG, SDLoc(N)),
1647 CurDAG->getRegister(0, MVT::i32), Chain };
1648 SDNode *New = CurDAG->getMachineNode(Opcode, SDLoc(N), MVT::i32, MVT::i32,
1649 MVT::Other, Ops);
1650 transferMemOperands(N, New);
1651 ReplaceNode(N, New);
1652 return true;
1653 } else {
1654 SDValue Chain = LD->getChain();
1655 SDValue Base = LD->getBasePtr();
1656 SDValue Ops[]= { Base, Offset, AMOpc, getAL(CurDAG, SDLoc(N)),
1657 CurDAG->getRegister(0, MVT::i32), Chain };
1658 SDNode *New = CurDAG->getMachineNode(Opcode, SDLoc(N), MVT::i32, MVT::i32,
1659 MVT::Other, Ops);
1660 transferMemOperands(N, New);
1661 ReplaceNode(N, New);
1662 return true;
1663 }
1664 }
1665
1666 return false;
1667}
1668
1669bool ARMDAGToDAGISel::tryT1IndexedLoad(SDNode *N) {
1670 LoadSDNode *LD = cast<LoadSDNode>(N);
1671 EVT LoadedVT = LD->getMemoryVT();
1672 ISD::MemIndexedMode AM = LD->getAddressingMode();
1673 if (AM != ISD::POST_INC || LD->getExtensionType() != ISD::NON_EXTLOAD ||
1674 LoadedVT.getSimpleVT().SimpleTy != MVT::i32)
1675 return false;
1676
1677 auto *COffs = dyn_cast<ConstantSDNode>(LD->getOffset());
1678 if (!COffs || COffs->getZExtValue() != 4)
1679 return false;
1680
1681 // A T1 post-indexed load is just a single register LDM: LDM r0!, {r1}.
1682 // The encoding of LDM is not how the rest of ISel expects a post-inc load to
1683 // look however, so we use a pseudo here and switch it for a tLDMIA_UPD after
1684 // ISel.
1685 SDValue Chain = LD->getChain();
1686 SDValue Base = LD->getBasePtr();
1687 SDValue Ops[]= { Base, getAL(CurDAG, SDLoc(N)),
1688 CurDAG->getRegister(0, MVT::i32), Chain };
1689 SDNode *New = CurDAG->getMachineNode(ARM::tLDR_postidx, SDLoc(N), MVT::i32,
1690 MVT::i32, MVT::Other, Ops);
1691 transferMemOperands(N, New);
1692 ReplaceNode(N, New);
1693 return true;
1694}
1695
1696bool ARMDAGToDAGISel::tryT2IndexedLoad(SDNode *N) {
1697 LoadSDNode *LD = cast<LoadSDNode>(N);
1698 ISD::MemIndexedMode AM = LD->getAddressingMode();
1699 if (AM == ISD::UNINDEXED)
1700 return false;
1701
1702 EVT LoadedVT = LD->getMemoryVT();
1703 bool isSExtLd = LD->getExtensionType() == ISD::SEXTLOAD;
1705 bool isPre = (AM == ISD::PRE_INC) || (AM == ISD::PRE_DEC);
1706 unsigned Opcode = 0;
1707 bool Match = false;
1708 if (SelectT2AddrModeImm8Offset(N, LD->getOffset(), Offset)) {
1709 switch (LoadedVT.getSimpleVT().SimpleTy) {
1710 case MVT::i32:
1711 Opcode = isPre ? ARM::t2LDR_PRE : ARM::t2LDR_POST;
1712 break;
1713 case MVT::i16:
1714 if (isSExtLd)
1715 Opcode = isPre ? ARM::t2LDRSH_PRE : ARM::t2LDRSH_POST;
1716 else
1717 Opcode = isPre ? ARM::t2LDRH_PRE : ARM::t2LDRH_POST;
1718 break;
1719 case MVT::i8:
1720 case MVT::i1:
1721 if (isSExtLd)
1722 Opcode = isPre ? ARM::t2LDRSB_PRE : ARM::t2LDRSB_POST;
1723 else
1724 Opcode = isPre ? ARM::t2LDRB_PRE : ARM::t2LDRB_POST;
1725 break;
1726 default:
1727 return false;
1728 }
1729 Match = true;
1730 }
1731
1732 if (Match) {
1733 SDValue Chain = LD->getChain();
1734 SDValue Base = LD->getBasePtr();
1735 SDValue Ops[]= { Base, Offset, getAL(CurDAG, SDLoc(N)),
1736 CurDAG->getRegister(0, MVT::i32), Chain };
1737 SDNode *New = CurDAG->getMachineNode(Opcode, SDLoc(N), MVT::i32, MVT::i32,
1738 MVT::Other, Ops);
1739 transferMemOperands(N, New);
1740 ReplaceNode(N, New);
1741 return true;
1742 }
1743
1744 return false;
1745}
1746
1747bool ARMDAGToDAGISel::tryMVEIndexedLoad(SDNode *N) {
1748 EVT LoadedVT;
1749 unsigned Opcode = 0;
1750 bool isSExtLd, isPre;
1751 Align Alignment;
1752 ARMVCC::VPTCodes Pred;
1753 SDValue PredReg;
1754 SDValue Chain, Base, Offset;
1755
1756 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
1757 ISD::MemIndexedMode AM = LD->getAddressingMode();
1758 if (AM == ISD::UNINDEXED)
1759 return false;
1760 LoadedVT = LD->getMemoryVT();
1761 if (!LoadedVT.isVector())
1762 return false;
1763
1764 Chain = LD->getChain();
1765 Base = LD->getBasePtr();
1766 Offset = LD->getOffset();
1767 Alignment = LD->getAlign();
1768 isSExtLd = LD->getExtensionType() == ISD::SEXTLOAD;
1769 isPre = (AM == ISD::PRE_INC) || (AM == ISD::PRE_DEC);
1770 Pred = ARMVCC::None;
1771 PredReg = CurDAG->getRegister(0, MVT::i32);
1772 } else if (MaskedLoadSDNode *LD = dyn_cast<MaskedLoadSDNode>(N)) {
1773 ISD::MemIndexedMode AM = LD->getAddressingMode();
1774 if (AM == ISD::UNINDEXED)
1775 return false;
1776 LoadedVT = LD->getMemoryVT();
1777 if (!LoadedVT.isVector())
1778 return false;
1779
1780 Chain = LD->getChain();
1781 Base = LD->getBasePtr();
1782 Offset = LD->getOffset();
1783 Alignment = LD->getAlign();
1784 isSExtLd = LD->getExtensionType() == ISD::SEXTLOAD;
1785 isPre = (AM == ISD::PRE_INC) || (AM == ISD::PRE_DEC);
1786 Pred = ARMVCC::Then;
1787 PredReg = LD->getMask();
1788 } else
1789 llvm_unreachable("Expected a Load or a Masked Load!");
1790
1791 // We allow LE non-masked loads to change the type (for example use a vldrb.8
1792 // as opposed to a vldrw.32). This can allow extra addressing modes or
1793 // alignments for what is otherwise an equivalent instruction.
1794 bool CanChangeType = Subtarget->isLittle() && !isa<MaskedLoadSDNode>(N);
1795
1796 SDValue NewOffset;
1797 if (Alignment >= Align(2) && LoadedVT == MVT::v4i16 &&
1798 SelectT2AddrModeImm7Offset(N, Offset, NewOffset, 1)) {
1799 if (isSExtLd)
1800 Opcode = isPre ? ARM::MVE_VLDRHS32_pre : ARM::MVE_VLDRHS32_post;
1801 else
1802 Opcode = isPre ? ARM::MVE_VLDRHU32_pre : ARM::MVE_VLDRHU32_post;
1803 } else if (LoadedVT == MVT::v8i8 &&
1804 SelectT2AddrModeImm7Offset(N, Offset, NewOffset, 0)) {
1805 if (isSExtLd)
1806 Opcode = isPre ? ARM::MVE_VLDRBS16_pre : ARM::MVE_VLDRBS16_post;
1807 else
1808 Opcode = isPre ? ARM::MVE_VLDRBU16_pre : ARM::MVE_VLDRBU16_post;
1809 } else if (LoadedVT == MVT::v4i8 &&
1810 SelectT2AddrModeImm7Offset(N, Offset, NewOffset, 0)) {
1811 if (isSExtLd)
1812 Opcode = isPre ? ARM::MVE_VLDRBS32_pre : ARM::MVE_VLDRBS32_post;
1813 else
1814 Opcode = isPre ? ARM::MVE_VLDRBU32_pre : ARM::MVE_VLDRBU32_post;
1815 } else if (Alignment >= Align(4) &&
1816 (CanChangeType || LoadedVT == MVT::v4i32 ||
1817 LoadedVT == MVT::v4f32) &&
1818 SelectT2AddrModeImm7Offset(N, Offset, NewOffset, 2))
1819 Opcode = isPre ? ARM::MVE_VLDRWU32_pre : ARM::MVE_VLDRWU32_post;
1820 else if (Alignment >= Align(2) &&
1821 (CanChangeType || LoadedVT == MVT::v8i16 ||
1822 LoadedVT == MVT::v8f16) &&
1823 SelectT2AddrModeImm7Offset(N, Offset, NewOffset, 1))
1824 Opcode = isPre ? ARM::MVE_VLDRHU16_pre : ARM::MVE_VLDRHU16_post;
1825 else if ((CanChangeType || LoadedVT == MVT::v16i8) &&
1826 SelectT2AddrModeImm7Offset(N, Offset, NewOffset, 0))
1827 Opcode = isPre ? ARM::MVE_VLDRBU8_pre : ARM::MVE_VLDRBU8_post;
1828 else
1829 return false;
1830
1831 SDValue Ops[] = {Base,
1832 NewOffset,
1833 CurDAG->getTargetConstant(Pred, SDLoc(N), MVT::i32),
1834 PredReg,
1835 CurDAG->getRegister(0, MVT::i32), // tp_reg
1836 Chain};
1837 SDNode *New = CurDAG->getMachineNode(Opcode, SDLoc(N), MVT::i32,
1838 N->getValueType(0), MVT::Other, Ops);
1839 transferMemOperands(N, New);
1840 ReplaceUses(SDValue(N, 0), SDValue(New, 1));
1841 ReplaceUses(SDValue(N, 1), SDValue(New, 0));
1842 ReplaceUses(SDValue(N, 2), SDValue(New, 2));
1843 CurDAG->RemoveDeadNode(N);
1844 return true;
1845}
1846
1847/// Form a GPRPair pseudo register from a pair of GPR regs.
1848SDNode *ARMDAGToDAGISel::createGPRPairNode(EVT VT, SDValue V0, SDValue V1) {
1849 SDLoc dl(V0.getNode());
1850 SDValue RegClass =
1851 CurDAG->getTargetConstant(ARM::GPRPairRegClassID, dl, MVT::i32);
1852 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::gsub_0, dl, MVT::i32);
1853 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::gsub_1, dl, MVT::i32);
1854 const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1 };
1855 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops);
1856}
1857
1858/// Form a D register from a pair of S registers.
1859SDNode *ARMDAGToDAGISel::createSRegPairNode(EVT VT, SDValue V0, SDValue V1) {
1860 SDLoc dl(V0.getNode());
1861 SDValue RegClass =
1862 CurDAG->getTargetConstant(ARM::DPR_VFP2RegClassID, dl, MVT::i32);
1863 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::ssub_0, dl, MVT::i32);
1864 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::ssub_1, dl, MVT::i32);
1865 const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1 };
1866 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops);
1867}
1868
1869/// Form a quad register from a pair of D registers.
1870SDNode *ARMDAGToDAGISel::createDRegPairNode(EVT VT, SDValue V0, SDValue V1) {
1871 SDLoc dl(V0.getNode());
1872 SDValue RegClass = CurDAG->getTargetConstant(ARM::QPRRegClassID, dl,
1873 MVT::i32);
1874 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::dsub_0, dl, MVT::i32);
1875 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::dsub_1, dl, MVT::i32);
1876 const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1 };
1877 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops);
1878}
1879
1880/// Form 4 consecutive D registers from a pair of Q registers.
1881SDNode *ARMDAGToDAGISel::createQRegPairNode(EVT VT, SDValue V0, SDValue V1) {
1882 SDLoc dl(V0.getNode());
1883 SDValue RegClass = CurDAG->getTargetConstant(ARM::QQPRRegClassID, dl,
1884 MVT::i32);
1885 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::qsub_0, dl, MVT::i32);
1886 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::qsub_1, dl, MVT::i32);
1887 const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1 };
1888 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops);
1889}
1890
1891/// Form 4 consecutive S registers.
1892SDNode *ARMDAGToDAGISel::createQuadSRegsNode(EVT VT, SDValue V0, SDValue V1,
1893 SDValue V2, SDValue V3) {
1894 SDLoc dl(V0.getNode());
1895 SDValue RegClass =
1896 CurDAG->getTargetConstant(ARM::QPR_VFP2RegClassID, dl, MVT::i32);
1897 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::ssub_0, dl, MVT::i32);
1898 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::ssub_1, dl, MVT::i32);
1899 SDValue SubReg2 = CurDAG->getTargetConstant(ARM::ssub_2, dl, MVT::i32);
1900 SDValue SubReg3 = CurDAG->getTargetConstant(ARM::ssub_3, dl, MVT::i32);
1901 const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1,
1902 V2, SubReg2, V3, SubReg3 };
1903 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops);
1904}
1905
1906/// Form 4 consecutive D registers.
1907SDNode *ARMDAGToDAGISel::createQuadDRegsNode(EVT VT, SDValue V0, SDValue V1,
1908 SDValue V2, SDValue V3) {
1909 SDLoc dl(V0.getNode());
1910 SDValue RegClass = CurDAG->getTargetConstant(ARM::QQPRRegClassID, dl,
1911 MVT::i32);
1912 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::dsub_0, dl, MVT::i32);
1913 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::dsub_1, dl, MVT::i32);
1914 SDValue SubReg2 = CurDAG->getTargetConstant(ARM::dsub_2, dl, MVT::i32);
1915 SDValue SubReg3 = CurDAG->getTargetConstant(ARM::dsub_3, dl, MVT::i32);
1916 const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1,
1917 V2, SubReg2, V3, SubReg3 };
1918 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops);
1919}
1920
1921/// Form 4 consecutive Q registers.
1922SDNode *ARMDAGToDAGISel::createQuadQRegsNode(EVT VT, SDValue V0, SDValue V1,
1923 SDValue V2, SDValue V3) {
1924 SDLoc dl(V0.getNode());
1925 SDValue RegClass = CurDAG->getTargetConstant(ARM::QQQQPRRegClassID, dl,
1926 MVT::i32);
1927 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::qsub_0, dl, MVT::i32);
1928 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::qsub_1, dl, MVT::i32);
1929 SDValue SubReg2 = CurDAG->getTargetConstant(ARM::qsub_2, dl, MVT::i32);
1930 SDValue SubReg3 = CurDAG->getTargetConstant(ARM::qsub_3, dl, MVT::i32);
1931 const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1,
1932 V2, SubReg2, V3, SubReg3 };
1933 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops);
1934}
1935
1936/// GetVLDSTAlign - Get the alignment (in bytes) for the alignment operand
1937/// of a NEON VLD or VST instruction. The supported values depend on the
1938/// number of registers being loaded.
1939SDValue ARMDAGToDAGISel::GetVLDSTAlign(SDValue Align, const SDLoc &dl,
1940 unsigned NumVecs, bool is64BitVector) {
1941 unsigned NumRegs = NumVecs;
1942 if (!is64BitVector && NumVecs < 3)
1943 NumRegs *= 2;
1944
1945 unsigned Alignment = cast<ConstantSDNode>(Align)->getZExtValue();
1946 if (Alignment >= 32 && NumRegs == 4)
1947 Alignment = 32;
1948 else if (Alignment >= 16 && (NumRegs == 2 || NumRegs == 4))
1949 Alignment = 16;
1950 else if (Alignment >= 8)
1951 Alignment = 8;
1952 else
1953 Alignment = 0;
1954
1955 return CurDAG->getTargetConstant(Alignment, dl, MVT::i32);
1956}
1957
1958static bool isVLDfixed(unsigned Opc)
1959{
1960 switch (Opc) {
1961 default: return false;
1962 case ARM::VLD1d8wb_fixed : return true;
1963 case ARM::VLD1d16wb_fixed : return true;
1964 case ARM::VLD1d64Qwb_fixed : return true;
1965 case ARM::VLD1d32wb_fixed : return true;
1966 case ARM::VLD1d64wb_fixed : return true;
1967 case ARM::VLD1d8TPseudoWB_fixed : return true;
1968 case ARM::VLD1d16TPseudoWB_fixed : return true;
1969 case ARM::VLD1d32TPseudoWB_fixed : return true;
1970 case ARM::VLD1d64TPseudoWB_fixed : return true;
1971 case ARM::VLD1d8QPseudoWB_fixed : return true;
1972 case ARM::VLD1d16QPseudoWB_fixed : return true;
1973 case ARM::VLD1d32QPseudoWB_fixed : return true;
1974 case ARM::VLD1d64QPseudoWB_fixed : return true;
1975 case ARM::VLD1q8wb_fixed : return true;
1976 case ARM::VLD1q16wb_fixed : return true;
1977 case ARM::VLD1q32wb_fixed : return true;
1978 case ARM::VLD1q64wb_fixed : return true;
1979 case ARM::VLD1DUPd8wb_fixed : return true;
1980 case ARM::VLD1DUPd16wb_fixed : return true;
1981 case ARM::VLD1DUPd32wb_fixed : return true;
1982 case ARM::VLD1DUPq8wb_fixed : return true;
1983 case ARM::VLD1DUPq16wb_fixed : return true;
1984 case ARM::VLD1DUPq32wb_fixed : return true;
1985 case ARM::VLD2d8wb_fixed : return true;
1986 case ARM::VLD2d16wb_fixed : return true;
1987 case ARM::VLD2d32wb_fixed : return true;
1988 case ARM::VLD2q8PseudoWB_fixed : return true;
1989 case ARM::VLD2q16PseudoWB_fixed : return true;
1990 case ARM::VLD2q32PseudoWB_fixed : return true;
1991 case ARM::VLD2DUPd8wb_fixed : return true;
1992 case ARM::VLD2DUPd16wb_fixed : return true;
1993 case ARM::VLD2DUPd32wb_fixed : return true;
1994 case ARM::VLD2DUPq8OddPseudoWB_fixed: return true;
1995 case ARM::VLD2DUPq16OddPseudoWB_fixed: return true;
1996 case ARM::VLD2DUPq32OddPseudoWB_fixed: return true;
1997 }
1998}
1999
2000static bool isVSTfixed(unsigned Opc)
2001{
2002 switch (Opc) {
2003 default: return false;
2004 case ARM::VST1d8wb_fixed : return true;
2005 case ARM::VST1d16wb_fixed : return true;
2006 case ARM::VST1d32wb_fixed : return true;
2007 case ARM::VST1d64wb_fixed : return true;
2008 case ARM::VST1q8wb_fixed : return true;
2009 case ARM::VST1q16wb_fixed : return true;
2010 case ARM::VST1q32wb_fixed : return true;
2011 case ARM::VST1q64wb_fixed : return true;
2012 case ARM::VST1d8TPseudoWB_fixed : return true;
2013 case ARM::VST1d16TPseudoWB_fixed : return true;
2014 case ARM::VST1d32TPseudoWB_fixed : return true;
2015 case ARM::VST1d64TPseudoWB_fixed : return true;
2016 case ARM::VST1d8QPseudoWB_fixed : return true;
2017 case ARM::VST1d16QPseudoWB_fixed : return true;
2018 case ARM::VST1d32QPseudoWB_fixed : return true;
2019 case ARM::VST1d64QPseudoWB_fixed : return true;
2020 case ARM::VST2d8wb_fixed : return true;
2021 case ARM::VST2d16wb_fixed : return true;
2022 case ARM::VST2d32wb_fixed : return true;
2023 case ARM::VST2q8PseudoWB_fixed : return true;
2024 case ARM::VST2q16PseudoWB_fixed : return true;
2025 case ARM::VST2q32PseudoWB_fixed : return true;
2026 }
2027}
2028
2029// Get the register stride update opcode of a VLD/VST instruction that
2030// is otherwise equivalent to the given fixed stride updating instruction.
2031static unsigned getVLDSTRegisterUpdateOpcode(unsigned Opc) {
2032 assert((isVLDfixed(Opc) || isVSTfixed(Opc))
2033 && "Incorrect fixed stride updating instruction.");
2034 switch (Opc) {
2035 default: break;
2036 case ARM::VLD1d8wb_fixed: return ARM::VLD1d8wb_register;
2037 case ARM::VLD1d16wb_fixed: return ARM::VLD1d16wb_register;
2038 case ARM::VLD1d32wb_fixed: return ARM::VLD1d32wb_register;
2039 case ARM::VLD1d64wb_fixed: return ARM::VLD1d64wb_register;
2040 case ARM::VLD1q8wb_fixed: return ARM::VLD1q8wb_register;
2041 case ARM::VLD1q16wb_fixed: return ARM::VLD1q16wb_register;
2042 case ARM::VLD1q32wb_fixed: return ARM::VLD1q32wb_register;
2043 case ARM::VLD1q64wb_fixed: return ARM::VLD1q64wb_register;
2044 case ARM::VLD1d64Twb_fixed: return ARM::VLD1d64Twb_register;
2045 case ARM::VLD1d64Qwb_fixed: return ARM::VLD1d64Qwb_register;
2046 case ARM::VLD1d8TPseudoWB_fixed: return ARM::VLD1d8TPseudoWB_register;
2047 case ARM::VLD1d16TPseudoWB_fixed: return ARM::VLD1d16TPseudoWB_register;
2048 case ARM::VLD1d32TPseudoWB_fixed: return ARM::VLD1d32TPseudoWB_register;
2049 case ARM::VLD1d64TPseudoWB_fixed: return ARM::VLD1d64TPseudoWB_register;
2050 case ARM::VLD1d8QPseudoWB_fixed: return ARM::VLD1d8QPseudoWB_register;
2051 case ARM::VLD1d16QPseudoWB_fixed: return ARM::VLD1d16QPseudoWB_register;
2052 case ARM::VLD1d32QPseudoWB_fixed: return ARM::VLD1d32QPseudoWB_register;
2053 case ARM::VLD1d64QPseudoWB_fixed: return ARM::VLD1d64QPseudoWB_register;
2054 case ARM::VLD1DUPd8wb_fixed : return ARM::VLD1DUPd8wb_register;
2055 case ARM::VLD1DUPd16wb_fixed : return ARM::VLD1DUPd16wb_register;
2056 case ARM::VLD1DUPd32wb_fixed : return ARM::VLD1DUPd32wb_register;
2057 case ARM::VLD1DUPq8wb_fixed : return ARM::VLD1DUPq8wb_register;
2058 case ARM::VLD1DUPq16wb_fixed : return ARM::VLD1DUPq16wb_register;
2059 case ARM::VLD1DUPq32wb_fixed : return ARM::VLD1DUPq32wb_register;
2060 case ARM::VLD2DUPq8OddPseudoWB_fixed: return ARM::VLD2DUPq8OddPseudoWB_register;
2061 case ARM::VLD2DUPq16OddPseudoWB_fixed: return ARM::VLD2DUPq16OddPseudoWB_register;
2062 case ARM::VLD2DUPq32OddPseudoWB_fixed: return ARM::VLD2DUPq32OddPseudoWB_register;
2063
2064 case ARM::VST1d8wb_fixed: return ARM::VST1d8wb_register;
2065 case ARM::VST1d16wb_fixed: return ARM::VST1d16wb_register;
2066 case ARM::VST1d32wb_fixed: return ARM::VST1d32wb_register;
2067 case ARM::VST1d64wb_fixed: return ARM::VST1d64wb_register;
2068 case ARM::VST1q8wb_fixed: return ARM::VST1q8wb_register;
2069 case ARM::VST1q16wb_fixed: return ARM::VST1q16wb_register;
2070 case ARM::VST1q32wb_fixed: return ARM::VST1q32wb_register;
2071 case ARM::VST1q64wb_fixed: return ARM::VST1q64wb_register;
2072 case ARM::VST1d8TPseudoWB_fixed: return ARM::VST1d8TPseudoWB_register;
2073 case ARM::VST1d16TPseudoWB_fixed: return ARM::VST1d16TPseudoWB_register;
2074 case ARM::VST1d32TPseudoWB_fixed: return ARM::VST1d32TPseudoWB_register;
2075 case ARM::VST1d64TPseudoWB_fixed: return ARM::VST1d64TPseudoWB_register;
2076 case ARM::VST1d8QPseudoWB_fixed: return ARM::VST1d8QPseudoWB_register;
2077 case ARM::VST1d16QPseudoWB_fixed: return ARM::VST1d16QPseudoWB_register;
2078 case ARM::VST1d32QPseudoWB_fixed: return ARM::VST1d32QPseudoWB_register;
2079 case ARM::VST1d64QPseudoWB_fixed: return ARM::VST1d64QPseudoWB_register;
2080
2081 case ARM::VLD2d8wb_fixed: return ARM::VLD2d8wb_register;
2082 case ARM::VLD2d16wb_fixed: return ARM::VLD2d16wb_register;
2083 case ARM::VLD2d32wb_fixed: return ARM::VLD2d32wb_register;
2084 case ARM::VLD2q8PseudoWB_fixed: return ARM::VLD2q8PseudoWB_register;
2085 case ARM::VLD2q16PseudoWB_fixed: return ARM::VLD2q16PseudoWB_register;
2086 case ARM::VLD2q32PseudoWB_fixed: return ARM::VLD2q32PseudoWB_register;
2087
2088 case ARM::VST2d8wb_fixed: return ARM::VST2d8wb_register;
2089 case ARM::VST2d16wb_fixed: return ARM::VST2d16wb_register;
2090 case ARM::VST2d32wb_fixed: return ARM::VST2d32wb_register;
2091 case ARM::VST2q8PseudoWB_fixed: return ARM::VST2q8PseudoWB_register;
2092 case ARM::VST2q16PseudoWB_fixed: return ARM::VST2q16PseudoWB_register;
2093 case ARM::VST2q32PseudoWB_fixed: return ARM::VST2q32PseudoWB_register;
2094
2095 case ARM::VLD2DUPd8wb_fixed: return ARM::VLD2DUPd8wb_register;
2096 case ARM::VLD2DUPd16wb_fixed: return ARM::VLD2DUPd16wb_register;
2097 case ARM::VLD2DUPd32wb_fixed: return ARM::VLD2DUPd32wb_register;
2098 }
2099 return Opc; // If not one we handle, return it unchanged.
2100}
2101
2102/// Returns true if the given increment is a Constant known to be equal to the
2103/// access size performed by a NEON load/store. This means the "[rN]!" form can
2104/// be used.
2105static bool isPerfectIncrement(SDValue Inc, EVT VecTy, unsigned NumVecs) {
2106 auto C = dyn_cast<ConstantSDNode>(Inc);
2107 return C && C->getZExtValue() == VecTy.getSizeInBits() / 8 * NumVecs;
2108}
2109
2110void ARMDAGToDAGISel::SelectVLD(SDNode *N, bool isUpdating, unsigned NumVecs,
2111 const uint16_t *DOpcodes,
2112 const uint16_t *QOpcodes0,
2113 const uint16_t *QOpcodes1) {
2114 assert(Subtarget->hasNEON());
2115 assert(NumVecs >= 1 && NumVecs <= 4 && "VLD NumVecs out-of-range");
2116 SDLoc dl(N);
2117
2118 SDValue MemAddr, Align;
2119 bool IsIntrinsic = !isUpdating; // By coincidence, all supported updating
2120 // nodes are not intrinsics.
2121 unsigned AddrOpIdx = IsIntrinsic ? 2 : 1;
2122 if (!SelectAddrMode6(N, N->getOperand(AddrOpIdx), MemAddr, Align))
2123 return;
2124
2125 SDValue Chain = N->getOperand(0);
2126 EVT VT = N->getValueType(0);
2127 bool is64BitVector = VT.is64BitVector();
2128 Align = GetVLDSTAlign(Align, dl, NumVecs, is64BitVector);
2129
2130 unsigned OpcodeIndex;
2131 switch (VT.getSimpleVT().SimpleTy) {
2132 default: llvm_unreachable("unhandled vld type");
2133 // Double-register operations:
2134 case MVT::v8i8: OpcodeIndex = 0; break;
2135 case MVT::v4f16:
2136 case MVT::v4bf16:
2137 case MVT::v4i16: OpcodeIndex = 1; break;
2138 case MVT::v2f32:
2139 case MVT::v2i32: OpcodeIndex = 2; break;
2140 case MVT::v1i64: OpcodeIndex = 3; break;
2141 // Quad-register operations:
2142 case MVT::v16i8: OpcodeIndex = 0; break;
2143 case MVT::v8f16:
2144 case MVT::v8bf16:
2145 case MVT::v8i16: OpcodeIndex = 1; break;
2146 case MVT::v4f32:
2147 case MVT::v4i32: OpcodeIndex = 2; break;
2148 case MVT::v2f64:
2149 case MVT::v2i64: OpcodeIndex = 3; break;
2150 }
2151
2152 EVT ResTy;
2153 if (NumVecs == 1)
2154 ResTy = VT;
2155 else {
2156 unsigned ResTyElts = (NumVecs == 3) ? 4 : NumVecs;
2157 if (!is64BitVector)
2158 ResTyElts *= 2;
2159 ResTy = EVT::getVectorVT(*CurDAG->getContext(), MVT::i64, ResTyElts);
2160 }
2161 std::vector<EVT> ResTys;
2162 ResTys.push_back(ResTy);
2163 if (isUpdating)
2164 ResTys.push_back(MVT::i32);
2165 ResTys.push_back(MVT::Other);
2166
2167 SDValue Pred = getAL(CurDAG, dl);
2168 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
2169 SDNode *VLd;
2171
2172 // Double registers and VLD1/VLD2 quad registers are directly supported.
2173 if (is64BitVector || NumVecs <= 2) {
2174 unsigned Opc = (is64BitVector ? DOpcodes[OpcodeIndex] :
2175 QOpcodes0[OpcodeIndex]);
2176 Ops.push_back(MemAddr);
2177 Ops.push_back(Align);
2178 if (isUpdating) {
2179 SDValue Inc = N->getOperand(AddrOpIdx + 1);
2180 bool IsImmUpdate = isPerfectIncrement(Inc, VT, NumVecs);
2181 if (!IsImmUpdate) {
2182 // We use a VLD1 for v1i64 even if the pseudo says vld2/3/4, so
2183 // check for the opcode rather than the number of vector elements.
2184 if (isVLDfixed(Opc))
2186 Ops.push_back(Inc);
2187 // VLD1/VLD2 fixed increment does not need Reg0 so only include it in
2188 // the operands if not such an opcode.
2189 } else if (!isVLDfixed(Opc))
2190 Ops.push_back(Reg0);
2191 }
2192 Ops.push_back(Pred);
2193 Ops.push_back(Reg0);
2194 Ops.push_back(Chain);
2195 VLd = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
2196
2197 } else {
2198 // Otherwise, quad registers are loaded with two separate instructions,
2199 // where one loads the even registers and the other loads the odd registers.
2200 EVT AddrTy = MemAddr.getValueType();
2201
2202 // Load the even subregs. This is always an updating load, so that it
2203 // provides the address to the second load for the odd subregs.
2204 SDValue ImplDef =
2205 SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, dl, ResTy), 0);
2206 const SDValue OpsA[] = { MemAddr, Align, Reg0, ImplDef, Pred, Reg0, Chain };
2207 SDNode *VLdA = CurDAG->getMachineNode(QOpcodes0[OpcodeIndex], dl,
2208 ResTy, AddrTy, MVT::Other, OpsA);
2209 Chain = SDValue(VLdA, 2);
2210
2211 // Load the odd subregs.
2212 Ops.push_back(SDValue(VLdA, 1));
2213 Ops.push_back(Align);
2214 if (isUpdating) {
2215 SDValue Inc = N->getOperand(AddrOpIdx + 1);
2216 assert(isa<ConstantSDNode>(Inc.getNode()) &&
2217 "only constant post-increment update allowed for VLD3/4");
2218 (void)Inc;
2219 Ops.push_back(Reg0);
2220 }
2221 Ops.push_back(SDValue(VLdA, 0));
2222 Ops.push_back(Pred);
2223 Ops.push_back(Reg0);
2224 Ops.push_back(Chain);
2225 VLd = CurDAG->getMachineNode(QOpcodes1[OpcodeIndex], dl, ResTys, Ops);
2226 }
2227
2228 // Transfer memoperands.
2229 MachineMemOperand *MemOp = cast<MemIntrinsicSDNode>(N)->getMemOperand();
2230 CurDAG->setNodeMemRefs(cast<MachineSDNode>(VLd), {MemOp});
2231
2232 if (NumVecs == 1) {
2233 ReplaceNode(N, VLd);
2234 return;
2235 }
2236
2237 // Extract out the subregisters.
2238 SDValue SuperReg = SDValue(VLd, 0);
2239 static_assert(ARM::dsub_7 == ARM::dsub_0 + 7 &&
2240 ARM::qsub_3 == ARM::qsub_0 + 3,
2241 "Unexpected subreg numbering");
2242 unsigned Sub0 = (is64BitVector ? ARM::dsub_0 : ARM::qsub_0);
2243 for (unsigned Vec = 0; Vec < NumVecs; ++Vec)
2244 ReplaceUses(SDValue(N, Vec),
2245 CurDAG->getTargetExtractSubreg(Sub0 + Vec, dl, VT, SuperReg));
2246 ReplaceUses(SDValue(N, NumVecs), SDValue(VLd, 1));
2247 if (isUpdating)
2248 ReplaceUses(SDValue(N, NumVecs + 1), SDValue(VLd, 2));
2249 CurDAG->RemoveDeadNode(N);
2250}
2251
2252void ARMDAGToDAGISel::SelectVST(SDNode *N, bool isUpdating, unsigned NumVecs,
2253 const uint16_t *DOpcodes,
2254 const uint16_t *QOpcodes0,
2255 const uint16_t *QOpcodes1) {
2256 assert(Subtarget->hasNEON());
2257 assert(NumVecs >= 1 && NumVecs <= 4 && "VST NumVecs out-of-range");
2258 SDLoc dl(N);
2259
2260 SDValue MemAddr, Align;
2261 bool IsIntrinsic = !isUpdating; // By coincidence, all supported updating
2262 // nodes are not intrinsics.
2263 unsigned AddrOpIdx = IsIntrinsic ? 2 : 1;
2264 unsigned Vec0Idx = 3; // AddrOpIdx + (isUpdating ? 2 : 1)
2265 if (!SelectAddrMode6(N, N->getOperand(AddrOpIdx), MemAddr, Align))
2266 return;
2267
2268 MachineMemOperand *MemOp = cast<MemIntrinsicSDNode>(N)->getMemOperand();
2269
2270 SDValue Chain = N->getOperand(0);
2271 EVT VT = N->getOperand(Vec0Idx).getValueType();
2272 bool is64BitVector = VT.is64BitVector();
2273 Align = GetVLDSTAlign(Align, dl, NumVecs, is64BitVector);
2274
2275 unsigned OpcodeIndex;
2276 switch (VT.getSimpleVT().SimpleTy) {
2277 default: llvm_unreachable("unhandled vst type");
2278 // Double-register operations:
2279 case MVT::v8i8: OpcodeIndex = 0; break;
2280 case MVT::v4f16:
2281 case MVT::v4bf16:
2282 case MVT::v4i16: OpcodeIndex = 1; break;
2283 case MVT::v2f32:
2284 case MVT::v2i32: OpcodeIndex = 2; break;
2285 case MVT::v1i64: OpcodeIndex = 3; break;
2286 // Quad-register operations:
2287 case MVT::v16i8: OpcodeIndex = 0; break;
2288 case MVT::v8f16:
2289 case MVT::v8bf16:
2290 case MVT::v8i16: OpcodeIndex = 1; break;
2291 case MVT::v4f32:
2292 case MVT::v4i32: OpcodeIndex = 2; break;
2293 case MVT::v2f64:
2294 case MVT::v2i64: OpcodeIndex = 3; break;
2295 }
2296
2297 std::vector<EVT> ResTys;
2298 if (isUpdating)
2299 ResTys.push_back(MVT::i32);
2300 ResTys.push_back(MVT::Other);
2301
2302 SDValue Pred = getAL(CurDAG, dl);
2303 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
2305
2306 // Double registers and VST1/VST2 quad registers are directly supported.
2307 if (is64BitVector || NumVecs <= 2) {
2308 SDValue SrcReg;
2309 if (NumVecs == 1) {
2310 SrcReg = N->getOperand(Vec0Idx);
2311 } else if (is64BitVector) {
2312 // Form a REG_SEQUENCE to force register allocation.
2313 SDValue V0 = N->getOperand(Vec0Idx + 0);
2314 SDValue V1 = N->getOperand(Vec0Idx + 1);
2315 if (NumVecs == 2)
2316 SrcReg = SDValue(createDRegPairNode(MVT::v2i64, V0, V1), 0);
2317 else {
2318 SDValue V2 = N->getOperand(Vec0Idx + 2);
2319 // If it's a vst3, form a quad D-register and leave the last part as
2320 // an undef.
2321 SDValue V3 = (NumVecs == 3)
2322 ? SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,dl,VT), 0)
2323 : N->getOperand(Vec0Idx + 3);
2324 SrcReg = SDValue(createQuadDRegsNode(MVT::v4i64, V0, V1, V2, V3), 0);
2325 }
2326 } else {
2327 // Form a QQ register.
2328 SDValue Q0 = N->getOperand(Vec0Idx);
2329 SDValue Q1 = N->getOperand(Vec0Idx + 1);
2330 SrcReg = SDValue(createQRegPairNode(MVT::v4i64, Q0, Q1), 0);
2331 }
2332
2333 unsigned Opc = (is64BitVector ? DOpcodes[OpcodeIndex] :
2334 QOpcodes0[OpcodeIndex]);
2335 Ops.push_back(MemAddr);
2336 Ops.push_back(Align);
2337 if (isUpdating) {
2338 SDValue Inc = N->getOperand(AddrOpIdx + 1);
2339 bool IsImmUpdate = isPerfectIncrement(Inc, VT, NumVecs);
2340 if (!IsImmUpdate) {
2341 // We use a VST1 for v1i64 even if the pseudo says VST2/3/4, so
2342 // check for the opcode rather than the number of vector elements.
2343 if (isVSTfixed(Opc))
2345 Ops.push_back(Inc);
2346 }
2347 // VST1/VST2 fixed increment does not need Reg0 so only include it in
2348 // the operands if not such an opcode.
2349 else if (!isVSTfixed(Opc))
2350 Ops.push_back(Reg0);
2351 }
2352 Ops.push_back(SrcReg);
2353 Ops.push_back(Pred);
2354 Ops.push_back(Reg0);
2355 Ops.push_back(Chain);
2356 SDNode *VSt = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
2357
2358 // Transfer memoperands.
2359 CurDAG->setNodeMemRefs(cast<MachineSDNode>(VSt), {MemOp});
2360
2361 ReplaceNode(N, VSt);
2362 return;
2363 }
2364
2365 // Otherwise, quad registers are stored with two separate instructions,
2366 // where one stores the even registers and the other stores the odd registers.
2367
2368 // Form the QQQQ REG_SEQUENCE.
2369 SDValue V0 = N->getOperand(Vec0Idx + 0);
2370 SDValue V1 = N->getOperand(Vec0Idx + 1);
2371 SDValue V2 = N->getOperand(Vec0Idx + 2);
2372 SDValue V3 = (NumVecs == 3)
2373 ? SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, dl, VT), 0)
2374 : N->getOperand(Vec0Idx + 3);
2375 SDValue RegSeq = SDValue(createQuadQRegsNode(MVT::v8i64, V0, V1, V2, V3), 0);
2376
2377 // Store the even D registers. This is always an updating store, so that it
2378 // provides the address to the second store for the odd subregs.
2379 const SDValue OpsA[] = { MemAddr, Align, Reg0, RegSeq, Pred, Reg0, Chain };
2380 SDNode *VStA = CurDAG->getMachineNode(QOpcodes0[OpcodeIndex], dl,
2381 MemAddr.getValueType(),
2382 MVT::Other, OpsA);
2383 CurDAG->setNodeMemRefs(cast<MachineSDNode>(VStA), {MemOp});
2384 Chain = SDValue(VStA, 1);
2385
2386 // Store the odd D registers.
2387 Ops.push_back(SDValue(VStA, 0));
2388 Ops.push_back(Align);
2389 if (isUpdating) {
2390 SDValue Inc = N->getOperand(AddrOpIdx + 1);
2391 assert(isa<ConstantSDNode>(Inc.getNode()) &&
2392 "only constant post-increment update allowed for VST3/4");
2393 (void)Inc;
2394 Ops.push_back(Reg0);
2395 }
2396 Ops.push_back(RegSeq);
2397 Ops.push_back(Pred);
2398 Ops.push_back(Reg0);
2399 Ops.push_back(Chain);
2400 SDNode *VStB = CurDAG->getMachineNode(QOpcodes1[OpcodeIndex], dl, ResTys,
2401 Ops);
2402 CurDAG->setNodeMemRefs(cast<MachineSDNode>(VStB), {MemOp});
2403 ReplaceNode(N, VStB);
2404}
2405
2406void ARMDAGToDAGISel::SelectVLDSTLane(SDNode *N, bool IsLoad, bool isUpdating,
2407 unsigned NumVecs,
2408 const uint16_t *DOpcodes,
2409 const uint16_t *QOpcodes) {
2410 assert(Subtarget->hasNEON());
2411 assert(NumVecs >=2 && NumVecs <= 4 && "VLDSTLane NumVecs out-of-range");
2412 SDLoc dl(N);
2413
2414 SDValue MemAddr, Align;
2415 bool IsIntrinsic = !isUpdating; // By coincidence, all supported updating
2416 // nodes are not intrinsics.
2417 unsigned AddrOpIdx = IsIntrinsic ? 2 : 1;
2418 unsigned Vec0Idx = 3; // AddrOpIdx + (isUpdating ? 2 : 1)
2419 if (!SelectAddrMode6(N, N->getOperand(AddrOpIdx), MemAddr, Align))
2420 return;
2421
2422 MachineMemOperand *MemOp = cast<MemIntrinsicSDNode>(N)->getMemOperand();
2423
2424 SDValue Chain = N->getOperand(0);
2425 unsigned Lane =
2426 cast<ConstantSDNode>(N->getOperand(Vec0Idx + NumVecs))->getZExtValue();
2427 EVT VT = N->getOperand(Vec0Idx).getValueType();
2428 bool is64BitVector = VT.is64BitVector();
2429
2430 unsigned Alignment = 0;
2431 if (NumVecs != 3) {
2432 Alignment = cast<ConstantSDNode>(Align)->getZExtValue();
2433 unsigned NumBytes = NumVecs * VT.getScalarSizeInBits() / 8;
2434 if (Alignment > NumBytes)
2435 Alignment = NumBytes;
2436 if (Alignment < 8 && Alignment < NumBytes)
2437 Alignment = 0;
2438 // Alignment must be a power of two; make sure of that.
2439 Alignment = (Alignment & -Alignment);
2440 if (Alignment == 1)
2441 Alignment = 0;
2442 }
2443 Align = CurDAG->getTargetConstant(Alignment, dl, MVT::i32);
2444
2445 unsigned OpcodeIndex;
2446 switch (VT.getSimpleVT().SimpleTy) {
2447 default: llvm_unreachable("unhandled vld/vst lane type");
2448 // Double-register operations:
2449 case MVT::v8i8: OpcodeIndex = 0; break;
2450 case MVT::v4f16:
2451 case MVT::v4bf16:
2452 case MVT::v4i16: OpcodeIndex = 1; break;
2453 case MVT::v2f32:
2454 case MVT::v2i32: OpcodeIndex = 2; break;
2455 // Quad-register operations:
2456 case MVT::v8f16:
2457 case MVT::v8bf16:
2458 case MVT::v8i16: OpcodeIndex = 0; break;
2459 case MVT::v4f32:
2460 case MVT::v4i32: OpcodeIndex = 1; break;
2461 }
2462
2463 std::vector<EVT> ResTys;
2464 if (IsLoad) {
2465 unsigned ResTyElts = (NumVecs == 3) ? 4 : NumVecs;
2466 if (!is64BitVector)
2467 ResTyElts *= 2;
2468 ResTys.push_back(EVT::getVectorVT(*CurDAG->getContext(),
2469 MVT::i64, ResTyElts));
2470 }
2471 if (isUpdating)
2472 ResTys.push_back(MVT::i32);
2473 ResTys.push_back(MVT::Other);
2474
2475 SDValue Pred = getAL(CurDAG, dl);
2476 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
2477
2479 Ops.push_back(MemAddr);
2480 Ops.push_back(Align);
2481 if (isUpdating) {
2482 SDValue Inc = N->getOperand(AddrOpIdx + 1);
2483 bool IsImmUpdate =
2484 isPerfectIncrement(Inc, VT.getVectorElementType(), NumVecs);
2485 Ops.push_back(IsImmUpdate ? Reg0 : Inc);
2486 }
2487
2488 SDValue SuperReg;
2489 SDValue V0 = N->getOperand(Vec0Idx + 0);
2490 SDValue V1 = N->getOperand(Vec0Idx + 1);
2491 if (NumVecs == 2) {
2492 if (is64BitVector)
2493 SuperReg = SDValue(createDRegPairNode(MVT::v2i64, V0, V1), 0);
2494 else
2495 SuperReg = SDValue(createQRegPairNode(MVT::v4i64, V0, V1), 0);
2496 } else {
2497 SDValue V2 = N->getOperand(Vec0Idx + 2);
2498 SDValue V3 = (NumVecs == 3)
2499 ? SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, dl, VT), 0)
2500 : N->getOperand(Vec0Idx + 3);
2501 if (is64BitVector)
2502 SuperReg = SDValue(createQuadDRegsNode(MVT::v4i64, V0, V1, V2, V3), 0);
2503 else
2504 SuperReg = SDValue(createQuadQRegsNode(MVT::v8i64, V0, V1, V2, V3), 0);
2505 }
2506 Ops.push_back(SuperReg);
2507 Ops.push_back(getI32Imm(Lane, dl));
2508 Ops.push_back(Pred);
2509 Ops.push_back(Reg0);
2510 Ops.push_back(Chain);
2511
2512 unsigned Opc = (is64BitVector ? DOpcodes[OpcodeIndex] :
2513 QOpcodes[OpcodeIndex]);
2514 SDNode *VLdLn = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
2515 CurDAG->setNodeMemRefs(cast<MachineSDNode>(VLdLn), {MemOp});
2516 if (!IsLoad) {
2517 ReplaceNode(N, VLdLn);
2518 return;
2519 }
2520
2521 // Extract the subregisters.
2522 SuperReg = SDValue(VLdLn, 0);
2523 static_assert(ARM::dsub_7 == ARM::dsub_0 + 7 &&
2524 ARM::qsub_3 == ARM::qsub_0 + 3,
2525 "Unexpected subreg numbering");
2526 unsigned Sub0 = is64BitVector ? ARM::dsub_0 : ARM::qsub_0;
2527 for (unsigned Vec = 0; Vec < NumVecs; ++Vec)
2528 ReplaceUses(SDValue(N, Vec),
2529 CurDAG->getTargetExtractSubreg(Sub0 + Vec, dl, VT, SuperReg));
2530 ReplaceUses(SDValue(N, NumVecs), SDValue(VLdLn, 1));
2531 if (isUpdating)
2532 ReplaceUses(SDValue(N, NumVecs + 1), SDValue(VLdLn, 2));
2533 CurDAG->RemoveDeadNode(N);
2534}
2535
2536template <typename SDValueVector>
2537void ARMDAGToDAGISel::AddMVEPredicateToOps(SDValueVector &Ops, SDLoc Loc,
2538 SDValue PredicateMask) {
2539 Ops.push_back(CurDAG->getTargetConstant(ARMVCC::Then, Loc, MVT::i32));
2540 Ops.push_back(PredicateMask);
2541 Ops.push_back(CurDAG->getRegister(0, MVT::i32)); // tp_reg
2542}
2543
2544template <typename SDValueVector>
2545void ARMDAGToDAGISel::AddMVEPredicateToOps(SDValueVector &Ops, SDLoc Loc,
2546 SDValue PredicateMask,
2547 SDValue Inactive) {
2548 Ops.push_back(CurDAG->getTargetConstant(ARMVCC::Then, Loc, MVT::i32));
2549 Ops.push_back(PredicateMask);
2550 Ops.push_back(CurDAG->getRegister(0, MVT::i32)); // tp_reg
2551 Ops.push_back(Inactive);
2552}
2553
2554template <typename SDValueVector>
2555void ARMDAGToDAGISel::AddEmptyMVEPredicateToOps(SDValueVector &Ops, SDLoc Loc) {
2556 Ops.push_back(CurDAG->getTargetConstant(ARMVCC::None, Loc, MVT::i32));
2557 Ops.push_back(CurDAG->getRegister(0, MVT::i32));
2558 Ops.push_back(CurDAG->getRegister(0, MVT::i32)); // tp_reg
2559}
2560
2561template <typename SDValueVector>
2562void ARMDAGToDAGISel::AddEmptyMVEPredicateToOps(SDValueVector &Ops, SDLoc Loc,
2563 EVT InactiveTy) {
2564 Ops.push_back(CurDAG->getTargetConstant(ARMVCC::None, Loc, MVT::i32));
2565 Ops.push_back(CurDAG->getRegister(0, MVT::i32));
2566 Ops.push_back(CurDAG->getRegister(0, MVT::i32)); // tp_reg
2567 Ops.push_back(SDValue(
2568 CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, Loc, InactiveTy), 0));
2569}
2570
2571void ARMDAGToDAGISel::SelectMVE_WB(SDNode *N, const uint16_t *Opcodes,
2572 bool Predicated) {
2573 SDLoc Loc(N);
2575
2576 uint16_t Opcode;
2577 switch (N->getValueType(1).getVectorElementType().getSizeInBits()) {
2578 case 32:
2579 Opcode = Opcodes[0];
2580 break;
2581 case 64:
2582 Opcode = Opcodes[1];
2583 break;
2584 default:
2585 llvm_unreachable("bad vector element size in SelectMVE_WB");
2586 }
2587
2588 Ops.push_back(N->getOperand(2)); // vector of base addresses
2589
2590 int32_t ImmValue = cast<ConstantSDNode>(N->getOperand(3))->getZExtValue();
2591 Ops.push_back(getI32Imm(ImmValue, Loc)); // immediate offset
2592
2593 if (Predicated)
2594 AddMVEPredicateToOps(Ops, Loc, N->getOperand(4));
2595 else
2596 AddEmptyMVEPredicateToOps(Ops, Loc);
2597
2598 Ops.push_back(N->getOperand(0)); // chain
2599
2601 VTs.push_back(N->getValueType(1));
2602 VTs.push_back(N->getValueType(0));
2603 VTs.push_back(N->getValueType(2));
2604
2605 SDNode *New = CurDAG->getMachineNode(Opcode, SDLoc(N), VTs, Ops);
2606 ReplaceUses(SDValue(N, 0), SDValue(New, 1));
2607 ReplaceUses(SDValue(N, 1), SDValue(New, 0));
2608 ReplaceUses(SDValue(N, 2), SDValue(New, 2));
2609 transferMemOperands(N, New);
2610 CurDAG->RemoveDeadNode(N);
2611}
2612
2613void ARMDAGToDAGISel::SelectMVE_LongShift(SDNode *N, uint16_t Opcode,
2614 bool Immediate,
2615 bool HasSaturationOperand) {
2616 SDLoc Loc(N);
2618
2619 // Two 32-bit halves of the value to be shifted
2620 Ops.push_back(N->getOperand(1));
2621 Ops.push_back(N->getOperand(2));
2622
2623 // The shift count
2624 if (Immediate) {
2625 int32_t ImmValue = cast<ConstantSDNode>(N->getOperand(3))->getZExtValue();
2626 Ops.push_back(getI32Imm(ImmValue, Loc)); // immediate shift count
2627 } else {
2628 Ops.push_back(N->getOperand(3));
2629 }
2630
2631 // The immediate saturation operand, if any
2632 if (HasSaturationOperand) {
2633 int32_t SatOp = cast<ConstantSDNode>(N->getOperand(4))->getZExtValue();
2634 int SatBit = (SatOp == 64 ? 0 : 1);
2635 Ops.push_back(getI32Imm(SatBit, Loc));
2636 }
2637
2638 // MVE scalar shifts are IT-predicable, so include the standard
2639 // predicate arguments.
2640 Ops.push_back(getAL(CurDAG, Loc));
2641 Ops.push_back(CurDAG->getRegister(0, MVT::i32));
2642
2643 CurDAG->SelectNodeTo(N, Opcode, N->getVTList(), ArrayRef(Ops));
2644}
2645
2646void ARMDAGToDAGISel::SelectMVE_VADCSBC(SDNode *N, uint16_t OpcodeWithCarry,
2647 uint16_t OpcodeWithNoCarry,
2648 bool Add, bool Predicated) {
2649 SDLoc Loc(N);
2651 uint16_t Opcode;
2652
2653 unsigned FirstInputOp = Predicated ? 2 : 1;
2654
2655 // Two input vectors and the input carry flag
2656 Ops.push_back(N->getOperand(FirstInputOp));
2657 Ops.push_back(N->getOperand(FirstInputOp + 1));
2658 SDValue CarryIn = N->getOperand(FirstInputOp + 2);
2659 ConstantSDNode *CarryInConstant = dyn_cast<ConstantSDNode>(CarryIn);
2660 uint32_t CarryMask = 1 << 29;
2661 uint32_t CarryExpected = Add ? 0 : CarryMask;
2662 if (CarryInConstant &&
2663 (CarryInConstant->getZExtValue() & CarryMask) == CarryExpected) {
2664 Opcode = OpcodeWithNoCarry;
2665 } else {
2666 Ops.push_back(CarryIn);
2667 Opcode = OpcodeWithCarry;
2668 }
2669
2670 if (Predicated)
2671 AddMVEPredicateToOps(Ops, Loc,
2672 N->getOperand(FirstInputOp + 3), // predicate
2673 N->getOperand(FirstInputOp - 1)); // inactive
2674 else
2675 AddEmptyMVEPredicateToOps(Ops, Loc, N->getValueType(0));
2676
2677 CurDAG->SelectNodeTo(N, Opcode, N->getVTList(), ArrayRef(Ops));
2678}
2679
2680void ARMDAGToDAGISel::SelectMVE_VSHLC(SDNode *N, bool Predicated) {
2681 SDLoc Loc(N);
2683
2684 // One vector input, followed by a 32-bit word of bits to shift in
2685 // and then an immediate shift count
2686 Ops.push_back(N->getOperand(1));
2687 Ops.push_back(N->getOperand(2));
2688 int32_t ImmValue = cast<ConstantSDNode>(N->getOperand(3))->getZExtValue();
2689 Ops.push_back(getI32Imm(ImmValue, Loc)); // immediate shift count
2690
2691 if (Predicated)
2692 AddMVEPredicateToOps(Ops, Loc, N->getOperand(4));
2693 else
2694 AddEmptyMVEPredicateToOps(Ops, Loc);
2695
2696 CurDAG->SelectNodeTo(N, ARM::MVE_VSHLC, N->getVTList(), ArrayRef(Ops));
2697}
2698
2699static bool SDValueToConstBool(SDValue SDVal) {
2700 assert(isa<ConstantSDNode>(SDVal) && "expected a compile-time constant");
2701 ConstantSDNode *SDValConstant = dyn_cast<ConstantSDNode>(SDVal);
2702 uint64_t Value = SDValConstant->getZExtValue();
2703 assert((Value == 0 || Value == 1) && "expected value 0 or 1");
2704 return Value;
2705}
2706
2707void ARMDAGToDAGISel::SelectBaseMVE_VMLLDAV(SDNode *N, bool Predicated,
2708 const uint16_t *OpcodesS,
2709 const uint16_t *OpcodesU,
2710 size_t Stride, size_t TySize) {
2711 assert(TySize < Stride && "Invalid TySize");
2712 bool IsUnsigned = SDValueToConstBool(N->getOperand(1));
2713 bool IsSub = SDValueToConstBool(N->getOperand(2));
2714 bool IsExchange = SDValueToConstBool(N->getOperand(3));
2715 if (IsUnsigned) {
2716 assert(!IsSub &&
2717 "Unsigned versions of vmlsldav[a]/vrmlsldavh[a] do not exist");
2718 assert(!IsExchange &&
2719 "Unsigned versions of vmlaldav[a]x/vrmlaldavh[a]x do not exist");
2720 }
2721
2722 auto OpIsZero = [N](size_t OpNo) {
2723 return isNullConstant(N->getOperand(OpNo));
2724 };
2725
2726 // If the input accumulator value is not zero, select an instruction with
2727 // accumulator, otherwise select an instruction without accumulator
2728 bool IsAccum = !(OpIsZero(4) && OpIsZero(5));
2729
2730 const uint16_t *Opcodes = IsUnsigned ? OpcodesU : OpcodesS;
2731 if (IsSub)
2732 Opcodes += 4 * Stride;
2733 if (IsExchange)
2734 Opcodes += 2 * Stride;
2735 if (IsAccum)
2736 Opcodes += Stride;
2737 uint16_t Opcode = Opcodes[TySize];
2738
2739 SDLoc Loc(N);
2741 // Push the accumulator operands, if they are used
2742 if (IsAccum) {
2743 Ops.push_back(N->getOperand(4));
2744 Ops.push_back(N->getOperand(5));
2745 }
2746 // Push the two vector operands
2747 Ops.push_back(N->getOperand(6));
2748 Ops.push_back(N->getOperand(7));
2749
2750 if (Predicated)
2751 AddMVEPredicateToOps(Ops, Loc, N->getOperand(8));
2752 else
2753 AddEmptyMVEPredicateToOps(Ops, Loc);
2754
2755 CurDAG->SelectNodeTo(N, Opcode, N->getVTList(), ArrayRef(Ops));
2756}
2757
2758void ARMDAGToDAGISel::SelectMVE_VMLLDAV(SDNode *N, bool Predicated,
2759 const uint16_t *OpcodesS,
2760 const uint16_t *OpcodesU) {
2761 EVT VecTy = N->getOperand(6).getValueType();
2762 size_t SizeIndex;
2763 switch (VecTy.getVectorElementType().getSizeInBits()) {
2764 case 16:
2765 SizeIndex = 0;
2766 break;
2767 case 32:
2768 SizeIndex = 1;
2769 break;
2770 default:
2771 llvm_unreachable("bad vector element size");
2772 }
2773
2774 SelectBaseMVE_VMLLDAV(N, Predicated, OpcodesS, OpcodesU, 2, SizeIndex);
2775}
2776
2777void ARMDAGToDAGISel::SelectMVE_VRMLLDAVH(SDNode *N, bool Predicated,
2778 const uint16_t *OpcodesS,
2779 const uint16_t *OpcodesU) {
2780 assert(
2781 N->getOperand(6).getValueType().getVectorElementType().getSizeInBits() ==
2782 32 &&
2783 "bad vector element size");
2784 SelectBaseMVE_VMLLDAV(N, Predicated, OpcodesS, OpcodesU, 1, 0);
2785}
2786
2787void ARMDAGToDAGISel::SelectMVE_VLD(SDNode *N, unsigned NumVecs,
2788 const uint16_t *const *Opcodes,
2789 bool HasWriteback) {
2790 EVT VT = N->getValueType(0);
2791 SDLoc Loc(N);
2792
2793 const uint16_t *OurOpcodes;
2794 switch (VT.getVectorElementType().getSizeInBits()) {
2795 case 8:
2796 OurOpcodes = Opcodes[0];
2797 break;
2798 case 16:
2799 OurOpcodes = Opcodes[1];
2800 break;
2801 case 32:
2802 OurOpcodes = Opcodes[2];
2803 break;
2804 default:
2805 llvm_unreachable("bad vector element size in SelectMVE_VLD");
2806 }
2807
2808 EVT DataTy = EVT::getVectorVT(*CurDAG->getContext(), MVT::i64, NumVecs * 2);
2809 SmallVector<EVT, 4> ResultTys = {DataTy, MVT::Other};
2810 unsigned PtrOperand = HasWriteback ? 1 : 2;
2811
2812 auto Data = SDValue(
2813 CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, Loc, DataTy), 0);
2814 SDValue Chain = N->getOperand(0);
2815 // Add a MVE_VLDn instruction for each Vec, except the last
2816 for (unsigned Stage = 0; Stage < NumVecs - 1; ++Stage) {
2817 SDValue Ops[] = {Data, N->getOperand(PtrOperand), Chain};
2818 auto LoadInst =
2819 CurDAG->getMachineNode(OurOpcodes[Stage], Loc, ResultTys, Ops);
2820 Data = SDValue(LoadInst, 0);
2821 Chain = SDValue(LoadInst, 1);
2822 transferMemOperands(N, LoadInst);
2823 }
2824 // The last may need a writeback on it
2825 if (HasWriteback)
2826 ResultTys = {DataTy, MVT::i32, MVT::Other};
2827 SDValue Ops[] = {Data, N->getOperand(PtrOperand), Chain};
2828 auto LoadInst =
2829 CurDAG->getMachineNode(OurOpcodes[NumVecs - 1], Loc, ResultTys, Ops);
2830 transferMemOperands(N, LoadInst);
2831
2832 unsigned i;
2833 for (i = 0; i < NumVecs; i++)
2834 ReplaceUses(SDValue(N, i),
2835 CurDAG->getTargetExtractSubreg(ARM::qsub_0 + i, Loc, VT,
2836 SDValue(LoadInst, 0)));
2837 if (HasWriteback)
2838 ReplaceUses(SDValue(N, i++), SDValue(LoadInst, 1));
2839 ReplaceUses(SDValue(N, i), SDValue(LoadInst, HasWriteback ? 2 : 1));
2840 CurDAG->RemoveDeadNode(N);
2841}
2842
2843void ARMDAGToDAGISel::SelectMVE_VxDUP(SDNode *N, const uint16_t *Opcodes,
2844 bool Wrapping, bool Predicated) {
2845 EVT VT = N->getValueType(0);
2846 SDLoc Loc(N);
2847
2848 uint16_t Opcode;
2849 switch (VT.getScalarSizeInBits()) {
2850 case 8:
2851 Opcode = Opcodes[0];
2852 break;
2853 case 16:
2854 Opcode = Opcodes[1];
2855 break;
2856 case 32:
2857 Opcode = Opcodes[2];
2858 break;
2859 default:
2860 llvm_unreachable("bad vector element size in SelectMVE_VxDUP");
2861 }
2862
2864 unsigned OpIdx = 1;
2865
2866 SDValue Inactive;
2867 if (Predicated)
2868 Inactive = N->getOperand(OpIdx++);
2869
2870 Ops.push_back(N->getOperand(OpIdx++)); // base
2871 if (Wrapping)
2872 Ops.push_back(N->getOperand(OpIdx++)); // limit
2873
2874 SDValue ImmOp = N->getOperand(OpIdx++); // step
2875 int ImmValue = cast<ConstantSDNode>(ImmOp)->getZExtValue();
2876 Ops.push_back(getI32Imm(ImmValue, Loc));
2877
2878 if (Predicated)
2879 AddMVEPredicateToOps(Ops, Loc, N->getOperand(OpIdx), Inactive);
2880 else
2881 AddEmptyMVEPredicateToOps(Ops, Loc, N->getValueType(0));
2882
2883 CurDAG->SelectNodeTo(N, Opcode, N->getVTList(), ArrayRef(Ops));
2884}
2885
2886void ARMDAGToDAGISel::SelectCDE_CXxD(SDNode *N, uint16_t Opcode,
2887 size_t NumExtraOps, bool HasAccum) {
2888 bool IsBigEndian = CurDAG->getDataLayout().isBigEndian();
2889 SDLoc Loc(N);
2891
2892 unsigned OpIdx = 1;
2893
2894 // Convert and append the immediate operand designating the coprocessor.
2895 SDValue ImmCorpoc = N->getOperand(OpIdx++);
2896 uint32_t ImmCoprocVal = cast<ConstantSDNode>(ImmCorpoc)->getZExtValue();
2897 Ops.push_back(getI32Imm(ImmCoprocVal, Loc));
2898
2899 // For accumulating variants copy the low and high order parts of the
2900 // accumulator into a register pair and add it to the operand vector.
2901 if (HasAccum) {
2902 SDValue AccLo = N->getOperand(OpIdx++);
2903 SDValue AccHi = N->getOperand(OpIdx++);
2904 if (IsBigEndian)
2905 std::swap(AccLo, AccHi);
2906 Ops.push_back(SDValue(createGPRPairNode(MVT::Untyped, AccLo, AccHi), 0));
2907 }
2908
2909 // Copy extra operands as-is.
2910 for (size_t I = 0; I < NumExtraOps; I++)
2911 Ops.push_back(N->getOperand(OpIdx++));
2912
2913 // Convert and append the immediate operand
2914 SDValue Imm = N->getOperand(OpIdx);
2915 uint32_t ImmVal = cast<ConstantSDNode>(Imm)->getZExtValue();
2916 Ops.push_back(getI32Imm(ImmVal, Loc));
2917
2918 // Accumulating variants are IT-predicable, add predicate operands.
2919 if (HasAccum) {
2920 SDValue Pred = getAL(CurDAG, Loc);
2921 SDValue PredReg = CurDAG->getRegister(0, MVT::i32);
2922 Ops.push_back(Pred);
2923 Ops.push_back(PredReg);
2924 }
2925
2926 // Create the CDE intruction
2927 SDNode *InstrNode = CurDAG->getMachineNode(Opcode, Loc, MVT::Untyped, Ops);
2928 SDValue ResultPair = SDValue(InstrNode, 0);
2929
2930 // The original intrinsic had two outputs, and the output of the dual-register
2931 // CDE instruction is a register pair. We need to extract the two subregisters
2932 // and replace all uses of the original outputs with the extracted
2933 // subregisters.
2934 uint16_t SubRegs[2] = {ARM::gsub_0, ARM::gsub_1};
2935 if (IsBigEndian)
2936 std::swap(SubRegs[0], SubRegs[1]);
2937
2938 for (size_t ResIdx = 0; ResIdx < 2; ResIdx++) {
2939 if (SDValue(N, ResIdx).use_empty())
2940 continue;
2941 SDValue SubReg = CurDAG->getTargetExtractSubreg(SubRegs[ResIdx], Loc,
2942 MVT::i32, ResultPair);
2943 ReplaceUses(SDValue(N, ResIdx), SubReg);
2944 }
2945
2946 CurDAG->RemoveDeadNode(N);
2947}
2948
2949void ARMDAGToDAGISel::SelectVLDDup(SDNode *N, bool IsIntrinsic,
2950 bool isUpdating, unsigned NumVecs,
2951 const uint16_t *DOpcodes,
2952 const uint16_t *QOpcodes0,
2953 const uint16_t *QOpcodes1) {
2954 assert(Subtarget->hasNEON());
2955 assert(NumVecs >= 1 && NumVecs <= 4 && "VLDDup NumVecs out-of-range");
2956 SDLoc dl(N);
2957
2958 SDValue MemAddr, Align;
2959 unsigned AddrOpIdx = IsIntrinsic ? 2 : 1;
2960 if (!SelectAddrMode6(N, N->getOperand(AddrOpIdx), MemAddr, Align))
2961 return;
2962
2963 SDValue Chain = N->getOperand(0);
2964 EVT VT = N->getValueType(0);
2965 bool is64BitVector = VT.is64BitVector();
2966
2967 unsigned Alignment = 0;
2968 if (NumVecs != 3) {
2969 Alignment = cast<ConstantSDNode>(Align)->getZExtValue();
2970 unsigned NumBytes = NumVecs * VT.getScalarSizeInBits() / 8;
2971 if (Alignment > NumBytes)
2972 Alignment = NumBytes;
2973 if (Alignment < 8 && Alignment < NumBytes)
2974 Alignment = 0;
2975 // Alignment must be a power of two; make sure of that.
2976 Alignment = (Alignment & -Alignment);
2977 if (Alignment == 1)
2978 Alignment = 0;
2979 }
2980 Align = CurDAG->getTargetConstant(Alignment, dl, MVT::i32);
2981
2982 unsigned OpcodeIndex;
2983 switch (VT.getSimpleVT().SimpleTy) {
2984 default: llvm_unreachable("unhandled vld-dup type");
2985 case MVT::v8i8:
2986 case MVT::v16i8: OpcodeIndex = 0; break;
2987 case MVT::v4i16:
2988 case MVT::v8i16:
2989 case MVT::v4f16:
2990 case MVT::v8f16:
2991 case MVT::v4bf16:
2992 case MVT::v8bf16:
2993 OpcodeIndex = 1; break;
2994 case MVT::v2f32:
2995 case MVT::v2i32:
2996 case MVT::v4f32:
2997 case MVT::v4i32: OpcodeIndex = 2; break;
2998 case MVT::v1f64:
2999 case MVT::v1i64: OpcodeIndex = 3; break;
3000 }
3001
3002 unsigned ResTyElts = (NumVecs == 3) ? 4 : NumVecs;
3003 if (!is64BitVector)
3004 ResTyElts *= 2;
3005 EVT ResTy = EVT::getVectorVT(*CurDAG->getContext(), MVT::i64, ResTyElts);
3006
3007 std::vector<EVT> ResTys;
3008 ResTys.push_back(ResTy);
3009 if (isUpdating)
3010 ResTys.push_back(MVT::i32);
3011 ResTys.push_back(MVT::Other);
3012
3013 SDValue Pred = getAL(CurDAG, dl);
3014 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
3015
3017 Ops.push_back(MemAddr);
3018 Ops.push_back(Align);
3019 unsigned Opc = is64BitVector ? DOpcodes[OpcodeIndex]
3020 : (NumVecs == 1) ? QOpcodes0[OpcodeIndex]
3021 : QOpcodes1[OpcodeIndex];
3022 if (isUpdating) {
3023 SDValue Inc = N->getOperand(2);
3024 bool IsImmUpdate =
3025 isPerfectIncrement(Inc, VT.getVectorElementType(), NumVecs);
3026 if (IsImmUpdate) {
3027 if (!isVLDfixed(Opc))
3028 Ops.push_back(Reg0);
3029 } else {
3030 if (isVLDfixed(Opc))
3032 Ops.push_back(Inc);
3033 }
3034 }
3035 if (is64BitVector || NumVecs == 1) {
3036 // Double registers and VLD1 quad registers are directly supported.
3037 } else if (NumVecs == 2) {
3038 const SDValue OpsA[] = {MemAddr, Align, Pred, Reg0, Chain};
3039 SDNode *VLdA = CurDAG->getMachineNode(QOpcodes0[OpcodeIndex], dl, ResTy,
3040 MVT::Other, OpsA);
3041 Chain = SDValue(VLdA, 1);
3042 } else {
3043 SDValue ImplDef = SDValue(
3044 CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, dl, ResTy), 0);
3045 const SDValue OpsA[] = {MemAddr, Align, ImplDef, Pred, Reg0, Chain};
3046 SDNode *VLdA = CurDAG->getMachineNode(QOpcodes0[OpcodeIndex], dl, ResTy,
3047 MVT::Other, OpsA);
3048 Ops.push_back(SDValue(VLdA, 0));
3049 Chain = SDValue(VLdA, 1);
3050 }
3051
3052 Ops.push_back(Pred);
3053 Ops.push_back(Reg0);
3054 Ops.push_back(Chain);
3055
3056 SDNode *VLdDup = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
3057
3058 // Transfer memoperands.
3059 MachineMemOperand *MemOp = cast<MemIntrinsicSDNode>(N)->getMemOperand();
3060 CurDAG->setNodeMemRefs(cast<MachineSDNode>(VLdDup), {MemOp});
3061
3062 // Extract the subregisters.
3063 if (NumVecs == 1) {
3064 ReplaceUses(SDValue(N, 0), SDValue(VLdDup, 0));
3065 } else {
3066 SDValue SuperReg = SDValue(VLdDup, 0);
3067 static_assert(ARM::dsub_7 == ARM::dsub_0 + 7, "Unexpected subreg numbering");
3068 unsigned SubIdx = is64BitVector ? ARM::dsub_0 : ARM::qsub_0;
3069 for (unsigned Vec = 0; Vec != NumVecs; ++Vec) {
3070 ReplaceUses(SDValue(N, Vec),
3071 CurDAG->getTargetExtractSubreg(SubIdx+Vec, dl, VT, SuperReg));
3072 }
3073 }
3074 ReplaceUses(SDValue(N, NumVecs), SDValue(VLdDup, 1));
3075 if (isUpdating)
3076 ReplaceUses(SDValue(N, NumVecs + 1), SDValue(VLdDup, 2));
3077 CurDAG->RemoveDeadNode(N);
3078}
3079
3080bool ARMDAGToDAGISel::tryInsertVectorElt(SDNode *N) {
3081 if (!Subtarget->hasMVEIntegerOps())
3082 return false;
3083
3084 SDLoc dl(N);
3085
3086 // We are trying to use VMOV/VMOVX/VINS to more efficiently lower insert and
3087 // extracts of v8f16 and v8i16 vectors. Check that we have two adjacent
3088 // inserts of the correct type:
3089 SDValue Ins1 = SDValue(N, 0);
3090 SDValue Ins2 = N->getOperand(0);
3091 EVT VT = Ins1.getValueType();
3092 if (Ins2.getOpcode() != ISD::INSERT_VECTOR_ELT || !Ins2.hasOneUse() ||
3093 !isa<ConstantSDNode>(Ins1.getOperand(2)) ||
3094 !isa<ConstantSDNode>(Ins2.getOperand(2)) ||
3095 (VT != MVT::v8f16 && VT != MVT::v8i16) || (Ins2.getValueType() != VT))
3096 return false;
3097
3098 unsigned Lane1 = Ins1.getConstantOperandVal(2);
3099 unsigned Lane2 = Ins2.getConstantOperandVal(2);
3100 if (Lane2 % 2 != 0 || Lane1 != Lane2 + 1)
3101 return false;
3102
3103 // If the inserted values will be able to use T/B already, leave it to the
3104 // existing tablegen patterns. For example VCVTT/VCVTB.
3105 SDValue Val1 = Ins1.getOperand(1);
3106 SDValue Val2 = Ins2.getOperand(1);
3107 if (Val1.getOpcode() == ISD::FP_ROUND || Val2.getOpcode() == ISD::FP_ROUND)
3108 return false;
3109
3110 // Check if the inserted values are both extracts.
3111 if ((Val1.getOpcode() == ISD::EXTRACT_VECTOR_ELT ||
3112 Val1.getOpcode() == ARMISD::VGETLANEu) &&
3114 Val2.getOpcode() == ARMISD::VGETLANEu) &&
3115 isa<ConstantSDNode>(Val1.getOperand(1)) &&
3116 isa<ConstantSDNode>(Val2.getOperand(1)) &&
3117 (Val1.getOperand(0).getValueType() == MVT::v8f16 ||
3118 Val1.getOperand(0).getValueType() == MVT::v8i16) &&
3119 (Val2.getOperand(0).getValueType() == MVT::v8f16 ||
3120 Val2.getOperand(0).getValueType() == MVT::v8i16)) {
3121 unsigned ExtractLane1 = Val1.getConstantOperandVal(1);
3122 unsigned ExtractLane2 = Val2.getConstantOperandVal(1);
3123
3124 // If the two extracted lanes are from the same place and adjacent, this
3125 // simplifies into a f32 lane move.
3126 if (Val1.getOperand(0) == Val2.getOperand(0) && ExtractLane2 % 2 == 0 &&
3127 ExtractLane1 == ExtractLane2 + 1) {
3128 SDValue NewExt = CurDAG->getTargetExtractSubreg(
3129 ARM::ssub_0 + ExtractLane2 / 2, dl, MVT::f32, Val1.getOperand(0));
3130 SDValue NewIns = CurDAG->getTargetInsertSubreg(
3131 ARM::ssub_0 + Lane2 / 2, dl, VT, Ins2.getOperand(0),
3132 NewExt);
3133 ReplaceUses(Ins1, NewIns);
3134 return true;
3135 }
3136
3137 // Else v8i16 pattern of an extract and an insert, with a optional vmovx for
3138 // extracting odd lanes.
3139 if (VT == MVT::v8i16 && Subtarget->hasFullFP16()) {
3140 SDValue Inp1 = CurDAG->getTargetExtractSubreg(
3141 ARM::ssub_0 + ExtractLane1 / 2, dl, MVT::f32, Val1.getOperand(0));
3142 SDValue Inp2 = CurDAG->getTargetExtractSubreg(
3143 ARM::ssub_0 + ExtractLane2 / 2, dl, MVT::f32, Val2.getOperand(0));
3144 if (ExtractLane1 % 2 != 0)
3145 Inp1 = SDValue(CurDAG->getMachineNode(ARM::VMOVH, dl, MVT::f32, Inp1), 0);
3146 if (ExtractLane2 % 2 != 0)
3147 Inp2 = SDValue(CurDAG->getMachineNode(ARM::VMOVH, dl, MVT::f32, Inp2), 0);
3148 SDNode *VINS = CurDAG->getMachineNode(ARM::VINSH, dl, MVT::f32, Inp2, Inp1);
3149 SDValue NewIns =
3150 CurDAG->getTargetInsertSubreg(ARM::ssub_0 + Lane2 / 2, dl, MVT::v4f32,
3151 Ins2.getOperand(0), SDValue(VINS, 0));
3152 ReplaceUses(Ins1, NewIns);
3153 return true;
3154 }
3155 }
3156
3157 // The inserted values are not extracted - if they are f16 then insert them
3158 // directly using a VINS.
3159 if (VT == MVT::v8f16 && Subtarget->hasFullFP16()) {
3160 SDNode *VINS = CurDAG->getMachineNode(ARM::VINSH, dl, MVT::f32, Val2, Val1);
3161 SDValue NewIns =
3162 CurDAG->getTargetInsertSubreg(ARM::ssub_0 + Lane2 / 2, dl, MVT::v4f32,
3163 Ins2.getOperand(0), SDValue(VINS, 0));
3164 ReplaceUses(Ins1, NewIns);
3165 return true;
3166 }
3167
3168 return false;
3169}
3170
3171bool ARMDAGToDAGISel::transformFixedFloatingPointConversion(SDNode *N,
3172 SDNode *FMul,
3173 bool IsUnsigned,
3174 bool FixedToFloat) {
3175 auto Type = N->getValueType(0);
3176 unsigned ScalarBits = Type.getScalarSizeInBits();
3177 if (ScalarBits > 32)
3178 return false;
3179
3180 SDNodeFlags FMulFlags = FMul->getFlags();
3181 // The fixed-point vcvt and vcvt+vmul are not always equivalent if inf is
3182 // allowed in 16 bit unsigned floats
3183 if (ScalarBits == 16 && !FMulFlags.hasNoInfs() && IsUnsigned)
3184 return false;
3185
3186 SDValue ImmNode = FMul->getOperand(1);
3187 SDValue VecVal = FMul->getOperand(0);
3188 if (VecVal->getOpcode() == ISD::UINT_TO_FP ||
3189 VecVal->getOpcode() == ISD::SINT_TO_FP)
3190 VecVal = VecVal->getOperand(0);
3191
3192 if (VecVal.getValueType().getScalarSizeInBits() != ScalarBits)
3193 return false;
3194
3195 if (ImmNode.getOpcode() == ISD::BITCAST) {
3196 if (ImmNode.getValueType().getScalarSizeInBits() != ScalarBits)
3197 return false;
3198 ImmNode = ImmNode.getOperand(0);
3199 }
3200
3201 if (ImmNode.getValueType().getScalarSizeInBits() != ScalarBits)
3202 return false;
3203
3204 APFloat ImmAPF(0.0f);
3205 switch (ImmNode.getOpcode()) {
3206 case ARMISD::VMOVIMM:
3207 case ARMISD::VDUP: {
3208 if (!isa<ConstantSDNode>(ImmNode.getOperand(0)))
3209 return false;
3210 unsigned Imm = ImmNode.getConstantOperandVal(0);
3211 if (ImmNode.getOpcode() == ARMISD::VMOVIMM)
3212 Imm = ARM_AM::decodeVMOVModImm(Imm, ScalarBits);
3213 ImmAPF =
3214 APFloat(ScalarBits == 32 ? APFloat::IEEEsingle() : APFloat::IEEEhalf(),
3215 APInt(ScalarBits, Imm));
3216 break;
3217 }
3218 case ARMISD::VMOVFPIMM: {
3220 break;
3221 }
3222 default:
3223 return false;
3224 }
3225
3226 // Where n is the number of fractional bits, multiplying by 2^n will convert
3227 // from float to fixed and multiplying by 2^-n will convert from fixed to
3228 // float. Taking log2 of the factor (after taking the inverse in the case of
3229 // float to fixed) will give n.
3230 APFloat ToConvert = ImmAPF;
3231 if (FixedToFloat) {
3232 if (!ImmAPF.getExactInverse(&ToConvert))
3233 return false;
3234 }
3235 APSInt Converted(64, false);
3236 bool IsExact;
3238 &IsExact);
3239 if (!IsExact || !Converted.isPowerOf2())
3240 return false;
3241
3242 unsigned FracBits = Converted.logBase2();
3243 if (FracBits > ScalarBits)
3244 return false;
3245
3247 VecVal, CurDAG->getConstant(FracBits, SDLoc(N), MVT::i32)};
3248 AddEmptyMVEPredicateToOps(Ops, SDLoc(N), Type);
3249
3250 unsigned int Opcode;
3251 switch (ScalarBits) {
3252 case 16:
3253 if (FixedToFloat)
3254 Opcode = IsUnsigned ? ARM::MVE_VCVTf16u16_fix : ARM::MVE_VCVTf16s16_fix;
3255 else
3256 Opcode = IsUnsigned ? ARM::MVE_VCVTu16f16_fix : ARM::MVE_VCVTs16f16_fix;
3257 break;
3258 case 32:
3259 if (FixedToFloat)
3260 Opcode = IsUnsigned ? ARM::MVE_VCVTf32u32_fix : ARM::MVE_VCVTf32s32_fix;
3261 else
3262 Opcode = IsUnsigned ? ARM::MVE_VCVTu32f32_fix : ARM::MVE_VCVTs32f32_fix;
3263 break;
3264 default:
3265 llvm_unreachable("unexpected number of scalar bits");
3266 break;
3267 }
3268
3269 ReplaceNode(N, CurDAG->getMachineNode(Opcode, SDLoc(N), Type, Ops));
3270 return true;
3271}
3272
3273bool ARMDAGToDAGISel::tryFP_TO_INT(SDNode *N, SDLoc dl) {
3274 // Transform a floating-point to fixed-point conversion to a VCVT
3275 if (!Subtarget->hasMVEFloatOps())
3276 return false;
3277 EVT Type = N->getValueType(0);
3278 if (!Type.isVector())
3279 return false;
3280 unsigned int ScalarBits = Type.getScalarSizeInBits();
3281
3282 bool IsUnsigned = N->getOpcode() == ISD::FP_TO_UINT ||
3283 N->getOpcode() == ISD::FP_TO_UINT_SAT;
3284 SDNode *Node = N->getOperand(0).getNode();
3285
3286 // floating-point to fixed-point with one fractional bit gets turned into an
3287 // FP_TO_[U|S]INT(FADD (x, x)) rather than an FP_TO_[U|S]INT(FMUL (x, y))
3288 if (Node->getOpcode() == ISD::FADD) {
3289 if (Node->getOperand(0) != Node->getOperand(1))
3290 return false;
3291 SDNodeFlags Flags = Node->getFlags();
3292 // The fixed-point vcvt and vcvt+vmul are not always equivalent if inf is
3293 // allowed in 16 bit unsigned floats
3294 if (ScalarBits == 16 && !Flags.hasNoInfs() && IsUnsigned)
3295 return false;
3296
3297 unsigned Opcode;
3298 switch (ScalarBits) {
3299 case 16:
3300 Opcode = IsUnsigned ? ARM::MVE_VCVTu16f16_fix : ARM::MVE_VCVTs16f16_fix;
3301 break;
3302 case 32:
3303 Opcode = IsUnsigned ? ARM::MVE_VCVTu32f32_fix : ARM::MVE_VCVTs32f32_fix;
3304 break;
3305 }
3306 SmallVector<SDValue, 3> Ops{Node->getOperand(0),
3307 CurDAG->getConstant(1, dl, MVT::i32)};
3308 AddEmptyMVEPredicateToOps(Ops, dl, Type);
3309
3310 ReplaceNode(N, CurDAG->getMachineNode(Opcode, dl, Type, Ops));
3311 return true;
3312 }
3313
3314 if (Node->getOpcode() != ISD::FMUL)
3315 return false;
3316
3317 return transformFixedFloatingPointConversion(N, Node, IsUnsigned, false);
3318}
3319
3320bool ARMDAGToDAGISel::tryFMULFixed(SDNode *N, SDLoc dl) {
3321 // Transform a fixed-point to floating-point conversion to a VCVT
3322 if (!Subtarget->hasMVEFloatOps())
3323 return false;
3324 auto Type = N->getValueType(0);
3325 if (!Type.isVector())
3326 return false;
3327
3328 auto LHS = N->getOperand(0);
3329 if (LHS.getOpcode() != ISD::SINT_TO_FP && LHS.getOpcode() != ISD::UINT_TO_FP)
3330 return false;
3331
3332 return transformFixedFloatingPointConversion(
3333 N, N, LHS.getOpcode() == ISD::UINT_TO_FP, true);
3334}
3335
3336bool ARMDAGToDAGISel::tryV6T2BitfieldExtractOp(SDNode *N, bool isSigned) {
3337 if (!Subtarget->hasV6T2Ops())
3338 return false;
3339
3340 unsigned Opc = isSigned
3341 ? (Subtarget->isThumb() ? ARM::t2SBFX : ARM::SBFX)
3342 : (Subtarget->isThumb() ? ARM::t2UBFX : ARM::UBFX);
3343 SDLoc dl(N);
3344
3345 // For unsigned extracts, check for a shift right and mask
3346 unsigned And_imm = 0;
3347 if (N->getOpcode() == ISD::AND) {
3348 if (isOpcWithIntImmediate(N, ISD::AND, And_imm)) {
3349
3350 // The immediate is a mask of the low bits iff imm & (imm+1) == 0
3351 if (And_imm & (And_imm + 1))
3352 return false;
3353
3354 unsigned Srl_imm = 0;
3355 if (isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::SRL,
3356 Srl_imm)) {
3357 assert(Srl_imm > 0 && Srl_imm < 32 && "bad amount in shift node!");
3358
3359 // Mask off the unnecessary bits of the AND immediate; normally
3360 // DAGCombine will do this, but that might not happen if
3361 // targetShrinkDemandedConstant chooses a different immediate.
3362 And_imm &= -1U >> Srl_imm;
3363
3364 // Note: The width operand is encoded as width-1.
3365 unsigned Width = llvm::countr_one(And_imm) - 1;
3366 unsigned LSB = Srl_imm;
3367
3368 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
3369
3370 if ((LSB + Width + 1) == N->getValueType(0).getSizeInBits()) {
3371 // It's cheaper to use a right shift to extract the top bits.
3372 if (Subtarget->isThumb()) {
3373 Opc = isSigned ? ARM::t2ASRri : ARM::t2LSRri;
3374 SDValue Ops[] = { N->getOperand(0).getOperand(0),
3375 CurDAG->getTargetConstant(LSB, dl, MVT::i32),
3376 getAL(CurDAG, dl), Reg0, Reg0 };
3377 CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops);
3378 return true;
3379 }
3380
3381 // ARM models shift instructions as MOVsi with shifter operand.
3383 SDValue ShOpc =
3384 CurDAG->getTargetConstant(ARM_AM::getSORegOpc(ShOpcVal, LSB), dl,
3385 MVT::i32);
3386 SDValue Ops[] = { N->getOperand(0).getOperand(0), ShOpc,
3387 getAL(CurDAG, dl), Reg0, Reg0 };
3388 CurDAG->SelectNodeTo(N, ARM::MOVsi, MVT::i32, Ops);
3389 return true;
3390 }
3391
3392 assert(LSB + Width + 1 <= 32 && "Shouldn't create an invalid ubfx");
3393 SDValue Ops[] = { N->getOperand(0).getOperand(0),
3394 CurDAG->getTargetConstant(LSB, dl, MVT::i32),
3395 CurDAG->getTargetConstant(Width, dl, MVT::i32),
3396 getAL(CurDAG, dl), Reg0 };
3397 CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops);
3398 return true;
3399 }
3400 }
3401 return false;
3402 }
3403
3404 // Otherwise, we're looking for a shift of a shift
3405 unsigned Shl_imm = 0;
3406 if (isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::SHL, Shl_imm)) {
3407 assert(Shl_imm > 0 && Shl_imm < 32 && "bad amount in shift node!");
3408 unsigned Srl_imm = 0;
3409 if (isInt32Immediate(N->getOperand(1), Srl_imm)) {
3410 assert(Srl_imm > 0 && Srl_imm < 32 && "bad amount in shift node!");
3411 // Note: The width operand is encoded as width-1.
3412 unsigned Width = 32 - Srl_imm - 1;
3413 int LSB = Srl_imm - Shl_imm;
3414 if (LSB < 0)
3415 return false;
3416 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
3417 assert(LSB + Width + 1 <= 32 && "Shouldn't create an invalid ubfx");
3418 SDValue Ops[] = { N->getOperand(0).getOperand(0),
3419 CurDAG->getTargetConstant(LSB, dl, MVT::i32),
3420 CurDAG->getTargetConstant(Width, dl, MVT::i32),
3421 getAL(CurDAG, dl), Reg0 };
3422 CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops);
3423 return true;
3424 }
3425 }
3426
3427 // Or we are looking for a shift of an and, with a mask operand
3428 if (isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::AND, And_imm) &&
3429 isShiftedMask_32(And_imm)) {
3430 unsigned Srl_imm = 0;
3431 unsigned LSB = llvm::countr_zero(And_imm);
3432 // Shift must be the same as the ands lsb
3433 if (isInt32Immediate(N->getOperand(1), Srl_imm) && Srl_imm == LSB) {
3434 assert(Srl_imm > 0 && Srl_imm < 32 && "bad amount in shift node!");
3435 unsigned MSB = llvm::Log2_32(And_imm);
3436 // Note: The width operand is encoded as width-1.
3437 unsigned Width = MSB - LSB;
3438 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
3439 assert(Srl_imm + Width + 1 <= 32 && "Shouldn't create an invalid ubfx");
3440 SDValue Ops[] = { N->getOperand(0).getOperand(0),
3441 CurDAG->getTargetConstant(Srl_imm, dl, MVT::i32),
3442 CurDAG->getTargetConstant(Width, dl, MVT::i32),
3443 getAL(CurDAG, dl), Reg0 };
3444 CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops);
3445 return true;
3446 }
3447 }
3448
3449 if (N->getOpcode() == ISD::SIGN_EXTEND_INREG) {
3450 unsigned Width = cast<VTSDNode>(N->getOperand(1))->getVT().getSizeInBits();
3451 unsigned LSB = 0;
3452 if (!isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::SRL, LSB) &&
3453 !isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::SRA, LSB))
3454 return false;
3455
3456 if (LSB + Width > 32)
3457 return false;
3458
3459 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
3460 assert(LSB + Width <= 32 && "Shouldn't create an invalid ubfx");
3461 SDValue Ops[] = { N->getOperand(0).getOperand(0),
3462 CurDAG->getTargetConstant(LSB, dl, MVT::i32),
3463 CurDAG->getTargetConstant(Width - 1, dl, MVT::i32),
3464 getAL(CurDAG, dl), Reg0 };
3465 CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops);
3466 return true;
3467 }
3468
3469 return false;
3470}
3471
3472/// Target-specific DAG combining for ISD::SUB.
3473/// Target-independent combining lowers SELECT_CC nodes of the form
3474/// select_cc setg[ge] X, 0, X, -X
3475/// select_cc setgt X, -1, X, -X
3476/// select_cc setl[te] X, 0, -X, X
3477/// select_cc setlt X, 1, -X, X
3478/// which represent Integer ABS into:
3479/// Y = sra (X, size(X)-1); sub (xor (X, Y), Y)
3480/// ARM instruction selection detects the latter and matches it to
3481/// ARM::ABS or ARM::t2ABS machine node.
3482bool ARMDAGToDAGISel::tryABSOp(SDNode *N){
3483 SDValue SUBSrc0 = N->getOperand(0);
3484 SDValue SUBSrc1 = N->getOperand(1);
3485 EVT VT = N->getValueType(0);
3486
3487 if (Subtarget->isThumb1Only())
3488 return false;
3489
3490 if (SUBSrc0.getOpcode() != ISD::XOR || SUBSrc1.getOpcode() != ISD::SRA)
3491 return false;
3492
3493 SDValue XORSrc0 = SUBSrc0.getOperand(0);
3494 SDValue XORSrc1 = SUBSrc0.getOperand(1);
3495 SDValue SRASrc0 = SUBSrc1.getOperand(0);
3496 SDValue SRASrc1 = SUBSrc1.getOperand(1);
3497 ConstantSDNode *SRAConstant = dyn_cast<ConstantSDNode>(SRASrc1);
3498 EVT XType = SRASrc0.getValueType();
3499 unsigned Size = XType.getSizeInBits() - 1;
3500
3501 if (XORSrc1 == SUBSrc1 && XORSrc0 == SRASrc0 && XType.isInteger() &&
3502 SRAConstant != nullptr && Size == SRAConstant->getZExtValue()) {
3503 unsigned Opcode = Subtarget->isThumb2() ? ARM::t2ABS : ARM::ABS;
3504 CurDAG->SelectNodeTo(N, Opcode, VT, XORSrc0);
3505 return true;
3506 }
3507
3508 return false;
3509}
3510
3511/// We've got special pseudo-instructions for these
3512void ARMDAGToDAGISel::SelectCMP_SWAP(SDNode *N) {
3513 unsigned Opcode;
3514 EVT MemTy = cast<MemSDNode>(N)->getMemoryVT();
3515 if (MemTy == MVT::i8)
3516 Opcode = Subtarget->isThumb() ? ARM::tCMP_SWAP_8 : ARM::CMP_SWAP_8;
3517 else if (MemTy == MVT::i16)
3518 Opcode = Subtarget->isThumb() ? ARM::tCMP_SWAP_16 : ARM::CMP_SWAP_16;
3519 else if (MemTy == MVT::i32)
3520 Opcode = Subtarget->isThumb() ? ARM::tCMP_SWAP_32 : ARM::CMP_SWAP_32;
3521 else
3522 llvm_unreachable("Unknown AtomicCmpSwap type");
3523
3524 SDValue Ops[] = {N->getOperand(1), N->getOperand(2), N->getOperand(3),
3525 N->getOperand(0)};
3526 SDNode *CmpSwap = CurDAG->getMachineNode(
3527 Opcode, SDLoc(N),
3528 CurDAG->getVTList(MVT::i32, MVT::i32, MVT::Other), Ops);
3529
3530 MachineMemOperand *MemOp = cast<MemSDNode>(N)->getMemOperand();
3531 CurDAG->setNodeMemRefs(cast<MachineSDNode>(CmpSwap), {MemOp});
3532
3533 ReplaceUses(SDValue(N, 0), SDValue(CmpSwap, 0));
3534 ReplaceUses(SDValue(N, 1), SDValue(CmpSwap, 2));
3535 CurDAG->RemoveDeadNode(N);
3536}
3537
3538static std::optional<std::pair<unsigned, unsigned>>
3540 unsigned FirstOne = A.getBitWidth() - A.countl_zero() - 1;
3541 unsigned LastOne = A.countr_zero();
3542 if (A.popcount() != (FirstOne - LastOne + 1))
3543 return std::nullopt;
3544 return std::make_pair(FirstOne, LastOne);
3545}
3546
3547void ARMDAGToDAGISel::SelectCMPZ(SDNode *N, bool &SwitchEQNEToPLMI) {
3548 assert(N->getOpcode() == ARMISD::CMPZ);
3549 SwitchEQNEToPLMI = false;
3550
3551 if (!Subtarget->isThumb())
3552 // FIXME: Work out whether it is profitable to do this in A32 mode - LSL and
3553 // LSR don't exist as standalone instructions - they need the barrel shifter.
3554 return;
3555
3556 // select (cmpz (and X, C), #0) -> (LSLS X) or (LSRS X) or (LSRS (LSLS X))
3557 SDValue And = N->getOperand(0);
3558 if (!And->hasOneUse())
3559 return;
3560
3561 SDValue Zero = N->getOperand(1);
3562 if (!isNullConstant(Zero) || And->getOpcode() != ISD::AND)
3563 return;
3564 SDValue X = And.getOperand(0);
3565 auto C = dyn_cast<ConstantSDNode>(And.getOperand(1));
3566
3567 if (!C)
3568 return;
3569 auto Range = getContiguousRangeOfSetBits(C->getAPIntValue());
3570 if (!Range)
3571 return;
3572
3573 // There are several ways to lower this:
3574 SDNode *NewN;
3575 SDLoc dl(N);
3576
3577 auto EmitShift = [&](unsigned Opc, SDValue Src, unsigned Imm) -> SDNode* {
3578 if (Subtarget->isThumb2()) {
3579 Opc = (Opc == ARM::tLSLri) ? ARM::t2LSLri : ARM::t2LSRri;
3580 SDValue Ops[] = { Src, CurDAG->getTargetConstant(Imm, dl, MVT::i32),
3581 getAL(CurDAG, dl), CurDAG->getRegister(0, MVT::i32),
3582 CurDAG->getRegister(0, MVT::i32) };
3583 return CurDAG->getMachineNode(Opc, dl, MVT::i32, Ops);
3584 } else {
3585 SDValue Ops[] = {CurDAG->getRegister(ARM::CPSR, MVT::i32), Src,
3586 CurDAG->getTargetConstant(Imm, dl, MVT::i32),
3587 getAL(CurDAG, dl), CurDAG->getRegister(0, MVT::i32)};
3588 return CurDAG->getMachineNode(Opc, dl, MVT::i32, Ops);
3589 }
3590 };
3591
3592 if (Range->second == 0) {
3593 // 1. Mask includes the LSB -> Simply shift the top N bits off
3594 NewN = EmitShift(ARM::tLSLri, X, 31 - Range->first);
3595 ReplaceNode(And.getNode(), NewN);
3596 } else if (Range->first == 31) {
3597 // 2. Mask includes the MSB -> Simply shift the bottom N bits off
3598 NewN = EmitShift(ARM::tLSRri, X, Range->second);
3599 ReplaceNode(And.getNode(), NewN);
3600 } else if (Range->first == Range->second) {
3601 // 3. Only one bit is set. We can shift this into the sign bit and use a
3602 // PL/MI comparison.
3603 NewN = EmitShift(ARM::tLSLri, X, 31 - Range->first);
3604 ReplaceNode(And.getNode(), NewN);
3605
3606 SwitchEQNEToPLMI = true;
3607 } else if (!Subtarget->hasV6T2Ops()) {
3608 // 4. Do a double shift to clear bottom and top bits, but only in
3609 // thumb-1 mode as in thumb-2 we can use UBFX.
3610 NewN = EmitShift(ARM::tLSLri, X, 31 - Range->first);
3611 NewN = EmitShift(ARM::tLSRri, SDValue(NewN, 0),
3612 Range->second + (31 - Range->first));
3613 ReplaceNode(And.getNode(), NewN);
3614 }
3615}
3616
3617static unsigned getVectorShuffleOpcode(EVT VT, unsigned Opc64[3],
3618 unsigned Opc128[3]) {
3619 assert((VT.is64BitVector() || VT.is128BitVector()) &&
3620 "Unexpected vector shuffle length");
3621 switch (VT.getScalarSizeInBits()) {
3622 default:
3623 llvm_unreachable("Unexpected vector shuffle element size");
3624 case 8:
3625 return VT.is64BitVector() ? Opc64[0] : Opc128[0];
3626 case 16:
3627 return VT.is64BitVector() ? Opc64[1] : Opc128[1];
3628 case 32:
3629 return VT.is64BitVector() ? Opc64[2] : Opc128[2];
3630 }
3631}
3632
3633void ARMDAGToDAGISel::Select(SDNode *N) {
3634 SDLoc dl(N);
3635
3636 if (N->isMachineOpcode()) {
3637 N->setNodeId(-1);
3638 return; // Already selected.
3639 }
3640
3641 switch (N->getOpcode()) {
3642 default: break;
3643 case ISD::STORE: {
3644 // For Thumb1, match an sp-relative store in C++. This is a little
3645 // unfortunate, but I don't think I can make the chain check work
3646 // otherwise. (The chain of the store has to be the same as the chain
3647 // of the CopyFromReg, or else we can't replace the CopyFromReg with
3648 // a direct reference to "SP".)
3649 //
3650 // This is only necessary on Thumb1 because Thumb1 sp-relative stores use
3651 // a different addressing mode from other four-byte stores.
3652 //
3653 // This pattern usually comes up with call arguments.
3654 StoreSDNode *ST = cast<StoreSDNode>(N);
3655 SDValue Ptr = ST->getBasePtr();
3656 if (Subtarget->isThumb1Only() && ST->isUnindexed()) {
3657 int RHSC = 0;
3658 if (Ptr.getOpcode() == ISD::ADD &&
3659 isScaledConstantInRange(Ptr.getOperand(1), /*Scale=*/4, 0, 256, RHSC))
3660 Ptr = Ptr.getOperand(0);
3661
3662 if (Ptr.getOpcode() == ISD::CopyFromReg &&
3663 cast<RegisterSDNode>(Ptr.getOperand(1))->getReg() == ARM::SP &&
3664 Ptr.getOperand(0) == ST->getChain()) {
3665 SDValue Ops[] = {ST->getValue(),
3666 CurDAG->getRegister(ARM::SP, MVT::i32),
3667 CurDAG->getTargetConstant(RHSC, dl, MVT::i32),
3668 getAL(CurDAG, dl),
3669 CurDAG->getRegister(0, MVT::i32),
3670 ST->getChain()};
3671 MachineSDNode *ResNode =
3672 CurDAG->getMachineNode(ARM::tSTRspi, dl, MVT::Other, Ops);
3673 MachineMemOperand *MemOp = ST->getMemOperand();
3674 CurDAG->setNodeMemRefs(cast<MachineSDNode>(ResNode), {MemOp});
3675 ReplaceNode(N, ResNode);
3676 return;
3677 }
3678 }
3679 break;
3680 }
3682 if (tryWriteRegister(N))
3683 return;
3684 break;
3685 case ISD::READ_REGISTER:
3686 if (tryReadRegister(N))
3687 return;
3688 break;
3689 case ISD::INLINEASM:
3690 case ISD::INLINEASM_BR:
3691 if (tryInlineAsm(N))
3692 return;
3693 break;
3694 case ISD::SUB:
3695 // Select special operations if SUB node forms integer ABS pattern
3696 if (tryABSOp(N))
3697 return;
3698 // Other cases are autogenerated.
3699 break;
3700 case ISD::Constant: {
3701 unsigned Val = cast<ConstantSDNode>(N)->getZExtValue();
3702 // If we can't materialize the constant we need to use a literal pool
3703 if (ConstantMaterializationCost(Val, Subtarget) > 2 &&
3704 !Subtarget->genExecuteOnly()) {
3705 SDValue CPIdx = CurDAG->getTargetConstantPool(
3706 ConstantInt::get(Type::getInt32Ty(*CurDAG->getContext()), Val),
3707 TLI->getPointerTy(CurDAG->getDataLayout()));
3708
3709 SDNode *ResNode;
3710 if (Subtarget->isThumb()) {
3711 SDValue Ops[] = {
3712 CPIdx,
3713 getAL(CurDAG, dl),
3714 CurDAG->getRegister(0, MVT::i32),
3715 CurDAG->getEntryNode()
3716 };
3717 ResNode = CurDAG->getMachineNode(ARM::tLDRpci, dl, MVT::i32, MVT::Other,
3718 Ops);
3719 } else {
3720 SDValue Ops[] = {
3721 CPIdx,
3722 CurDAG->getTargetConstant(0, dl, MVT::i32),
3723 getAL(CurDAG, dl),
3724 CurDAG->getRegister(0, MVT::i32),
3725 CurDAG->getEntryNode()
3726 };
3727 ResNode = CurDAG->getMachineNode(ARM::LDRcp, dl, MVT::i32, MVT::Other,
3728 Ops);
3729 }
3730 // Annotate the Node with memory operand information so that MachineInstr
3731 // queries work properly. This e.g. gives the register allocation the
3732 // required information for rematerialization.
3733 MachineFunction& MF = CurDAG->getMachineFunction();
3737
3738 CurDAG->setNodeMemRefs(cast<MachineSDNode>(ResNode), {MemOp});
3739
3740 ReplaceNode(N, ResNode);
3741 return;
3742 }
3743
3744 // Other cases are autogenerated.
3745 break;
3746 }
3747 case ISD::FrameIndex: {
3748 // Selects to ADDri FI, 0 which in turn will become ADDri SP, imm.
3749 int FI = cast<FrameIndexSDNode>(N)->getIndex();
3750 SDValue TFI = CurDAG->getTargetFrameIndex(
3751 FI, TLI->getPointerTy(CurDAG->getDataLayout()));
3752 if (Subtarget->isThumb1Only()) {
3753 // Set the alignment of the frame object to 4, to avoid having to generate
3754 // more than one ADD
3755 MachineFrameInfo &MFI = MF->getFrameInfo();
3756 if (MFI.getObjectAlign(FI) < Align(4))
3757 MFI.setObjectAlignment(FI, Align(4));
3758 CurDAG->SelectNodeTo(N, ARM::tADDframe, MVT::i32, TFI,
3759 CurDAG->getTargetConstant(0, dl, MVT::i32));
3760 return;
3761 } else {
3762 unsigned Opc = ((Subtarget->isThumb() && Subtarget->hasThumb2()) ?
3763 ARM::t2ADDri : ARM::ADDri);
3764 SDValue Ops[] = { TFI, CurDAG->getTargetConstant(0, dl, MVT::i32),
3765 getAL(CurDAG, dl), CurDAG->getRegister(0, MVT::i32),
3766 CurDAG->getRegister(0, MVT::i32) };
3767 CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops);
3768 return;
3769 }
3770 }
3772 if (tryInsertVectorElt(N))
3773 return;
3774 break;
3775 }
3776 case ISD::SRL:
3777 if (tryV6T2BitfieldExtractOp(N, false))
3778 return;
3779 break;
3781 case ISD::SRA:
3782 if (tryV6T2BitfieldExtractOp(N, true))
3783 return;
3784 break;
3785 case ISD::FP_TO_UINT:
3786 case ISD::FP_TO_SINT:
3789 if (tryFP_TO_INT(N, dl))
3790 return;
3791 break;
3792 case ISD::FMUL:
3793 if (tryFMULFixed(N, dl))
3794 return;
3795 break;
3796 case ISD::MUL:
3797 if (Subtarget->isThumb1Only())
3798 break;
3799 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1))) {
3800 unsigned RHSV = C->getZExtValue();
3801 if (!RHSV) break;
3802 if (isPowerOf2_32(RHSV-1)) { // 2^n+1?
3803 unsigned ShImm = Log2_32(RHSV-1);
3804 if (ShImm >= 32)
3805 break;
3806 SDValue V = N->getOperand(0);
3807 ShImm = ARM_AM::getSORegOpc(ARM_AM::lsl, ShImm);
3808 SDValue ShImmOp = CurDAG->getTargetConstant(ShImm, dl, MVT::i32);
3809 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
3810 if (Subtarget->isThumb()) {
3811 SDValue Ops[] = { V, V, ShImmOp, getAL(CurDAG, dl), Reg0, Reg0 };
3812 CurDAG->SelectNodeTo(N, ARM::t2ADDrs, MVT::i32, Ops);
3813 return;
3814 } else {
3815 SDValue Ops[] = { V, V, Reg0, ShImmOp, getAL(CurDAG, dl), Reg0,
3816 Reg0 };
3817 CurDAG->SelectNodeTo(N, ARM::ADDrsi, MVT::i32, Ops);
3818 return;
3819 }
3820 }
3821 if (isPowerOf2_32(RHSV+1)) { // 2^n-1?
3822 unsigned ShImm = Log2_32(RHSV+1);
3823 if (ShImm >= 32)
3824 break;
3825 SDValue V = N->getOperand(0);
3826 ShImm = ARM_AM::getSORegOpc(ARM_AM::lsl, ShImm);
3827 SDValue ShImmOp = CurDAG->getTargetConstant(ShImm, dl, MVT::i32);
3828 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
3829 if (Subtarget->isThumb()) {
3830 SDValue Ops[] = { V, V, ShImmOp, getAL(CurDAG, dl), Reg0, Reg0 };
3831 CurDAG->SelectNodeTo(N, ARM::t2RSBrs, MVT::i32, Ops);
3832 return;
3833 } else {
3834 SDValue Ops[] = { V, V, Reg0, ShImmOp, getAL(CurDAG, dl), Reg0,
3835 Reg0 };
3836 CurDAG->SelectNodeTo(N, ARM::RSBrsi, MVT::i32, Ops);
3837 return;
3838 }
3839 }
3840 }
3841 break;
3842 case ISD::AND: {
3843 // Check for unsigned bitfield extract
3844 if (tryV6T2BitfieldExtractOp(N, false))
3845 return;
3846
3847 // If an immediate is used in an AND node, it is possible that the immediate
3848 // can be more optimally materialized when negated. If this is the case we
3849 // can negate the immediate and use a BIC instead.
3850 auto *N1C = dyn_cast<ConstantSDNode>(N->getOperand(1));
3851 if (N1C && N1C->hasOneUse() && Subtarget->isThumb()) {
3852 uint32_t Imm = (uint32_t) N1C->getZExtValue();
3853
3854 // In Thumb2 mode, an AND can take a 12-bit immediate. If this
3855 // immediate can be negated and fit in the immediate operand of
3856 // a t2BIC, don't do any manual transform here as this can be
3857 // handled by the generic ISel machinery.
3858 bool PreferImmediateEncoding =
3859 Subtarget->hasThumb2() && (is_t2_so_imm(Imm) || is_t2_so_imm_not(Imm));
3860 if (!PreferImmediateEncoding &&
3861 ConstantMaterializationCost(Imm, Subtarget) >
3862 ConstantMaterializationCost(~Imm, Subtarget)) {
3863 // The current immediate costs more to materialize than a negated
3864 // immediate, so negate the immediate and use a BIC.
3865 SDValue NewImm =
3866 CurDAG->getConstant(~N1C->getZExtValue(), dl, MVT::i32);
3867 // If the new constant didn't exist before, reposition it in the topological
3868 // ordering so it is just before N. Otherwise, don't touch its location.
3869 if (NewImm->getNodeId() == -1)
3870 CurDAG->RepositionNode(N->getIterator(), NewImm.getNode());
3871
3872 if (!Subtarget->hasThumb2()) {
3873 SDValue Ops[] = {CurDAG->getRegister(ARM::CPSR, MVT::i32),
3874 N->getOperand(0), NewImm, getAL(CurDAG, dl),
3875 CurDAG->getRegister(0, MVT::i32)};
3876 ReplaceNode(N, CurDAG->getMachineNode(ARM::tBIC, dl, MVT::i32, Ops));
3877 return;
3878 } else {
3879 SDValue Ops[] = {N->getOperand(0), NewImm, getAL(CurDAG, dl),
3880 CurDAG->getRegister(0, MVT::i32),
3881 CurDAG->getRegister(0, MVT::i32)};
3882 ReplaceNode(N,
3883 CurDAG->getMachineNode(ARM::t2BICrr, dl, MVT::i32, Ops));
3884 return;
3885 }
3886 }
3887 }
3888
3889 // (and (or x, c2), c1) and top 16-bits of c1 and c2 match, lower 16-bits
3890 // of c1 are 0xffff, and lower 16-bit of c2 are 0. That is, the top 16-bits
3891 // are entirely contributed by c2 and lower 16-bits are entirely contributed
3892 // by x. That's equal to (or (and x, 0xffff), (and c1, 0xffff0000)).
3893 // Select it to: "movt x, ((c1 & 0xffff) >> 16)
3894 EVT VT = N->getValueType(0);
3895 if (VT != MVT::i32)
3896 break;
3897 unsigned Opc = (Subtarget->isThumb() && Subtarget->hasThumb2())
3898 ? ARM::t2MOVTi16
3899 : (Subtarget->hasV6T2Ops() ? ARM::MOVTi16 : 0);
3900 if (!Opc)
3901 break;
3902 SDValue N0 = N->getOperand(0), N1 = N->getOperand(1);
3903 N1C = dyn_cast<ConstantSDNode>(N1);
3904 if (!N1C)
3905 break;
3906 if (N0.getOpcode() == ISD::OR && N0.getNode()->hasOneUse()) {
3907 SDValue N2 = N0.getOperand(1);
3908 ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2);
3909 if (!N2C)
3910 break;
3911 unsigned N1CVal = N1C->getZExtValue();
3912 unsigned N2CVal = N2C->getZExtValue();
3913 if ((N1CVal & 0xffff0000U) == (N2CVal & 0xffff0000U) &&
3914 (N1CVal & 0xffffU) == 0xffffU &&
3915 (N2CVal & 0xffffU) == 0x0U) {
3916 SDValue Imm16 = CurDAG->getTargetConstant((N2CVal & 0xFFFF0000U) >> 16,
3917 dl, MVT::i32);
3918 SDValue Ops[] = { N0.getOperand(0), Imm16,
3919 getAL(CurDAG, dl), CurDAG->getRegister(0, MVT::i32) };
3920 ReplaceNode(N, CurDAG->getMachineNode(Opc, dl, VT, Ops));
3921 return;
3922 }
3923 }
3924
3925 break;
3926 }
3927 case ARMISD::UMAAL: {
3928 unsigned Opc = Subtarget->isThumb() ? ARM::t2UMAAL : ARM::UMAAL;
3929 SDValue Ops[] = { N->getOperand(0), N->getOperand(1),
3930 N->getOperand(2), N->getOperand(3),
3931 getAL(CurDAG, dl),
3932 CurDAG->getRegister(0, MVT::i32) };
3933 ReplaceNode(N, CurDAG->getMachineNode(Opc, dl, MVT::i32, MVT::i32, Ops));
3934 return;
3935 }
3936 case ARMISD::UMLAL:{
3937 if (Subtarget->isThumb()) {
3938 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), N->getOperand(2),
3939 N->getOperand(3), getAL(CurDAG, dl),
3940 CurDAG->getRegister(0, MVT::i32)};
3941 ReplaceNode(
3942 N, CurDAG->getMachineNode(ARM::t2UMLAL, dl, MVT::i32, MVT::i32, Ops));
3943 return;
3944 }else{
3945 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), N->getOperand(2),
3946 N->getOperand(3), getAL(CurDAG, dl),
3947 CurDAG->getRegister(0, MVT::i32),
3948 CurDAG->getRegister(0, MVT::i32) };
3949 ReplaceNode(N, CurDAG->getMachineNode(
3950 Subtarget->hasV6Ops() ? ARM::UMLAL : ARM::UMLALv5, dl,
3951 MVT::i32, MVT::i32, Ops));
3952 return;
3953 }
3954 }
3955 case ARMISD::SMLAL:{
3956 if (Subtarget->isThumb()) {
3957 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), N->getOperand(2),
3958 N->getOperand(3), getAL(CurDAG, dl),
3959 CurDAG->getRegister(0, MVT::i32)};
3960 ReplaceNode(
3961 N, CurDAG->getMachineNode(ARM::t2SMLAL, dl, MVT::i32, MVT::i32, Ops));
3962 return;
3963 }else{
3964 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), N->getOperand(2),
3965 N->getOperand(3), getAL(CurDAG, dl),
3966 CurDAG->getRegister(0, MVT::i32),
3967 CurDAG->getRegister(0, MVT::i32) };
3968 ReplaceNode(N, CurDAG->getMachineNode(
3969 Subtarget->hasV6Ops() ? ARM::SMLAL : ARM::SMLALv5, dl,
3970 MVT::i32, MVT::i32, Ops));
3971 return;
3972 }
3973 }
3974 case ARMISD::SUBE: {
3975 if (!Subtarget->hasV6Ops() || !Subtarget->hasDSP())
3976 break;
3977 // Look for a pattern to match SMMLS
3978 // (sube a, (smul_loHi a, b), (subc 0, (smul_LOhi(a, b))))
3979 if (N->getOperand(1).getOpcode() != ISD::SMUL_LOHI ||
3980 N->getOperand(2).getOpcode() != ARMISD::SUBC ||
3981 !SDValue(N, 1).use_empty())
3982 break;
3983
3984 if (Subtarget->isThumb())
3985 assert(Subtarget->hasThumb2() &&
3986 "This pattern should not be generated for Thumb");
3987
3988 SDValue SmulLoHi = N->getOperand(1);
3989 SDValue Subc = N->getOperand(2);
3990 SDValue Zero = Subc.getOperand(0);
3991
3992 if (!isNullConstant(Zero) || Subc.getOperand(1) != SmulLoHi.getValue(0) ||
3993 N->getOperand(1) != SmulLoHi.getValue(1) ||
3994 N->getOperand(2) != Subc.getValue(1))
3995 break;
3996
3997 unsigned Opc = Subtarget->isThumb2() ? ARM::t2SMMLS : ARM::SMMLS;
3998 SDValue Ops[] = { SmulLoHi.getOperand(0), SmulLoHi.getOperand(1),
3999 N->getOperand(0), getAL(CurDAG, dl),
4000 CurDAG->getRegister(0, MVT::i32) };
4001 ReplaceNode(N, CurDAG->getMachineNode(Opc, dl, MVT::i32, Ops));
4002 return;
4003 }
4004 case ISD::LOAD: {
4005 if (Subtarget->hasMVEIntegerOps() && tryMVEIndexedLoad(N))
4006 return;
4007 if (Subtarget->isThumb() && Subtarget->hasThumb2()) {
4008 if (tryT2IndexedLoad(N))
4009 return;
4010 } else if (Subtarget->isThumb()) {
4011 if (tryT1IndexedLoad(N))
4012 return;
4013 } else if (tryARMIndexedLoad(N))
4014 return;
4015 // Other cases are autogenerated.
4016 break;
4017 }
4018 case ISD::MLOAD:
4019 if (Subtarget->hasMVEIntegerOps() && tryMVEIndexedLoad(N))
4020 return;
4021 // Other cases are autogenerated.
4022 break;
4023 case ARMISD::WLSSETUP: {
4024 SDNode *New = CurDAG->getMachineNode(ARM::t2WhileLoopSetup, dl, MVT::i32,
4025 N->getOperand(0));
4026 ReplaceUses(N, New);
4027 CurDAG->RemoveDeadNode(N);
4028 return;
4029 }
4030 case ARMISD::WLS: {
4031 SDNode *New = CurDAG->getMachineNode(ARM::t2WhileLoopStart, dl, MVT::Other,
4032 N->getOperand(1), N->getOperand(2),
4033 N->getOperand(0));
4034 ReplaceUses(N, New);
4035 CurDAG->RemoveDeadNode(N);
4036 return;
4037 }
4038 case ARMISD::LE: {
4039 SDValue Ops[] = { N->getOperand(1),
4040 N->getOperand(2),
4041 N->getOperand(0) };
4042 unsigned Opc = ARM::t2LoopEnd;
4043 SDNode *New = CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops);
4044 ReplaceUses(N, New);
4045 CurDAG->RemoveDeadNode(N);
4046 return;
4047 }
4048 case ARMISD::LDRD: {
4049 if (Subtarget->isThumb2())
4050 break; // TableGen handles isel in this case.
4051 SDValue Base, RegOffset, ImmOffset;
4052 const SDValue &Chain = N->getOperand(0);
4053 const SDValue &Addr = N->getOperand(1);
4054 SelectAddrMode3(Addr, Base, RegOffset, ImmOffset);
4055 if (RegOffset != CurDAG->getRegister(0, MVT::i32)) {
4056 // The register-offset variant of LDRD mandates that the register
4057 // allocated to RegOffset is not reused in any of the remaining operands.
4058 // This restriction is currently not enforced. Therefore emitting this
4059 // variant is explicitly avoided.
4060 Base = Addr;
4061 RegOffset = CurDAG->getRegister(0, MVT::i32);
4062 }
4063 SDValue Ops[] = {Base, RegOffset, ImmOffset, Chain};
4064 SDNode *New = CurDAG->getMachineNode(ARM::LOADDUAL, dl,
4065 {MVT::Untyped, MVT::Other}, Ops);
4066 SDValue Lo = CurDAG->getTargetExtractSubreg(ARM::gsub_0, dl, MVT::i32,
4067 SDValue(New, 0));
4068 SDValue Hi = CurDAG->getTargetExtractSubreg(ARM::gsub_1, dl, MVT::i32,
4069 SDValue(New, 0));
4070 transferMemOperands(N, New);
4071 ReplaceUses(SDValue(N, 0), Lo);
4072 ReplaceUses(SDValue(N, 1), Hi);
4073 ReplaceUses(SDValue(N, 2), SDValue(New, 1));
4074 CurDAG->RemoveDeadNode(N);
4075 return;
4076 }
4077 case ARMISD::STRD: {
4078 if (Subtarget->isThumb2())
4079 break; // TableGen handles isel in this case.
4080 SDValue Base, RegOffset, ImmOffset;
4081 const SDValue &Chain = N->getOperand(0);
4082 const SDValue &Addr = N->getOperand(3);
4083 SelectAddrMode3(Addr, Base, RegOffset, ImmOffset);
4084 if (RegOffset != CurDAG->getRegister(0, MVT::i32)) {
4085 // The register-offset variant of STRD mandates that the register
4086 // allocated to RegOffset is not reused in any of the remaining operands.
4087 // This restriction is currently not enforced. Therefore emitting this
4088 // variant is explicitly avoided.
4089 Base = Addr;
4090 RegOffset = CurDAG->getRegister(0, MVT::i32);
4091 }
4092 SDNode *RegPair =
4093 createGPRPairNode(MVT::Untyped, N->getOperand(1), N->getOperand(2));
4094 SDValue Ops[] = {SDValue(RegPair, 0), Base, RegOffset, ImmOffset, Chain};
4095 SDNode *New = CurDAG->getMachineNode(ARM::STOREDUAL, dl, MVT::Other, Ops);
4096 transferMemOperands(N, New);
4097 ReplaceUses(SDValue(N, 0), SDValue(New, 0));
4098 CurDAG->RemoveDeadNode(N);
4099 return;
4100 }
4101 case ARMISD::LOOP_DEC: {
4102 SDValue Ops[] = { N->getOperand(1),
4103 N->getOperand(2),
4104 N->getOperand(0) };
4105 SDNode *Dec =
4106 CurDAG->getMachineNode(ARM::t2LoopDec, dl,
4107 CurDAG->getVTList(MVT::i32, MVT::Other), Ops);
4108 ReplaceUses(N, Dec);
4109 CurDAG->RemoveDeadNode(N);
4110 return;
4111 }
4112 case ARMISD::BRCOND: {
4113 // Pattern: (ARMbrcond:void (bb:Other):$dst, (imm:i32):$cc)
4114 // Emits: (Bcc:void (bb:Other):$dst, (imm:i32):$cc)
4115 // Pattern complexity = 6 cost = 1 size = 0
4116
4117 // Pattern: (ARMbrcond:void (bb:Other):$dst, (imm:i32):$cc)
4118 // Emits: (tBcc:void (bb:Other):$dst, (imm:i32):$cc)
4119 // Pattern complexity = 6 cost = 1 size = 0
4120
4121 // Pattern: (ARMbrcond:void (bb:Other):$dst, (imm:i32):$cc)
4122 // Emits: (t2Bcc:void (bb:Other):$dst, (imm:i32):$cc)
4123 // Pattern complexity = 6 cost = 1 size = 0
4124
4125 unsigned Opc = Subtarget->isThumb() ?
4126 ((Subtarget->hasThumb2()) ? ARM::t2Bcc : ARM::tBcc) : ARM::Bcc;
4127 SDValue Chain = N->getOperand(0);
4128 SDValue N1 = N->getOperand(1);
4129 SDValue N2 = N->getOperand(2);
4130 SDValue N3 = N->getOperand(3);
4131 SDValue InGlue = N->getOperand(4);
4135
4136 unsigned CC = (unsigned) cast<ConstantSDNode>(N2)->getZExtValue();
4137
4138 if (InGlue.getOpcode() == ARMISD::CMPZ) {
4139 if (InGlue.getOperand(0).getOpcode() == ISD::INTRINSIC_W_CHAIN) {
4140 SDValue Int = InGlue.getOperand(0);
4141 uint64_t ID = cast<ConstantSDNode>(Int->getOperand(1))->getZExtValue();
4142
4143 // Handle low-overhead loops.
4144 if (ID == Intrinsic::loop_decrement_reg) {
4145 SDValue Elements = Int.getOperand(2);
4146 SDValue Size = CurDAG->getTargetConstant(
4147 cast<ConstantSDNode>(Int.getOperand(3))->getZExtValue(), dl,
4148 MVT::i32);
4149
4150 SDValue Args[] = { Elements, Size, Int.getOperand(0) };
4151 SDNode *LoopDec =
4152 CurDAG->getMachineNode(ARM::t2LoopDec, dl,
4153 CurDAG->getVTList(MVT::i32, MVT::Other),
4154 Args);
4155 ReplaceUses(Int.getNode(), LoopDec);
4156
4157 SDValue EndArgs[] = { SDValue(LoopDec, 0), N1, Chain };
4158 SDNode *LoopEnd =
4159 CurDAG->getMachineNode(ARM::t2LoopEnd, dl, MVT::Other, EndArgs);
4160
4161 ReplaceUses(N, LoopEnd);
4162 CurDAG->RemoveDeadNode(N);
4163 CurDAG->RemoveDeadNode(InGlue.getNode());
4164 CurDAG->RemoveDeadNode(Int.getNode());
4165 return;
4166 }
4167 }
4168
4169 bool SwitchEQNEToPLMI;
4170 SelectCMPZ(InGlue.getNode(), SwitchEQNEToPLMI);
4171 InGlue = N->getOperand(4);
4172
4173 if (SwitchEQNEToPLMI) {
4174 switch ((ARMCC::CondCodes)CC) {
4175 default: llvm_unreachable("CMPZ must be either NE or EQ!");
4176 case ARMCC::NE:
4178 break;
4179 case ARMCC::EQ:
4181 break;
4182 }
4183 }
4184 }
4185
4186 SDValue Tmp2 = CurDAG->getTargetConstant(CC, dl, MVT::i32);
4187 SDValue Ops[] = { N1, Tmp2, N3, Chain, InGlue };
4188 SDNode *ResNode = CurDAG->getMachineNode(Opc, dl, MVT::Other,
4189 MVT::Glue, Ops);
4190 Chain = SDValue(ResNode, 0);
4191 if (N->getNumValues() == 2) {
4192 InGlue = SDValue(ResNode, 1);
4193 ReplaceUses(SDValue(N, 1), InGlue);
4194 }
4195 ReplaceUses(SDValue(N, 0),
4196 SDValue(Chain.getNode(), Chain.getResNo()));
4197 CurDAG->RemoveDeadNode(N);
4198 return;
4199 }
4200
4201 case ARMISD::CMPZ: {
4202 // select (CMPZ X, #-C) -> (CMPZ (ADDS X, #C), #0)
4203 // This allows us to avoid materializing the expensive negative constant.
4204 // The CMPZ #0 is useless and will be peepholed away but we need to keep it
4205 // for its glue output.
4206 SDValue X = N->getOperand(0);
4207 auto *C = dyn_cast<ConstantSDNode>(N->getOperand(1).getNode());
4208 if (C && C->getSExtValue() < 0 && Subtarget->isThumb()) {
4209 int64_t Addend = -C->getSExtValue();
4210
4211 SDNode *Add = nullptr;
4212 // ADDS can be better than CMN if the immediate fits in a
4213 // 16-bit ADDS, which means either [0,256) for tADDi8 or [0,8) for tADDi3.
4214 // Outside that range we can just use a CMN which is 32-bit but has a
4215 // 12-bit immediate range.
4216 if (Addend < 1<<8) {
4217 if (Subtarget->isThumb2()) {
4218 SDValue Ops[] = { X, CurDAG->getTargetConstant(Addend, dl, MVT::i32),
4219 getAL(CurDAG, dl), CurDAG->getRegister(0, MVT::i32),
4220 CurDAG->getRegister(0, MVT::i32) };
4221 Add = CurDAG->getMachineNode(ARM::t2ADDri, dl, MVT::i32, Ops);
4222 } else {
4223 unsigned Opc = (Addend < 1<<3) ? ARM::tADDi3 : ARM::tADDi8;
4224 SDValue Ops[] = {CurDAG->getRegister(ARM::CPSR, MVT::i32), X,
4225 CurDAG->getTargetConstant(Addend, dl, MVT::i32),
4226 getAL(CurDAG, dl), CurDAG->getRegister(0, MVT::i32)};
4227 Add = CurDAG->getMachineNode(Opc, dl, MVT::i32, Ops);
4228 }
4229 }
4230 if (Add) {
4231 SDValue Ops2[] = {SDValue(Add, 0), CurDAG->getConstant(0, dl, MVT::i32)};
4232 CurDAG->MorphNodeTo(N, ARMISD::CMPZ, CurDAG->getVTList(MVT::Glue), Ops2);
4233 }
4234 }
4235 // Other cases are autogenerated.
4236 break;
4237 }
4238
4239 case ARMISD::CMOV: {
4240 SDValue InGlue = N->getOperand(4);
4241
4242 if (InGlue.getOpcode() == ARMISD::CMPZ) {
4243 bool SwitchEQNEToPLMI;
4244 SelectCMPZ(InGlue.getNode(), SwitchEQNEToPLMI);
4245
4246 if (SwitchEQNEToPLMI) {
4247 SDValue ARMcc = N->getOperand(2);
4249 (ARMCC::CondCodes)cast<ConstantSDNode>(ARMcc)->getZExtValue();
4250
4251 switch (CC) {
4252 default: llvm_unreachable("CMPZ must be either NE or EQ!");
4253 case ARMCC::NE:
4254 CC = ARMCC::MI;
4255 break;
4256 case ARMCC::EQ:
4257 CC = ARMCC::PL;
4258 break;
4259 }
4260 SDValue NewARMcc = CurDAG->getConstant((unsigned)CC, dl, MVT::i32);
4261 SDValue Ops[] = {N->getOperand(0), N->getOperand(1), NewARMcc,
4262 N->getOperand(3), N->getOperand(4)};
4263 CurDAG->MorphNodeTo(N, ARMISD::CMOV, N->getVTList(), Ops);
4264 }
4265
4266 }
4267 // Other cases are autogenerated.
4268 break;
4269 }
4270 case ARMISD::VZIP: {
4271 EVT VT = N->getValueType(0);
4272 // vzip.32 Dd, Dm is a pseudo-instruction expanded to vtrn.32 Dd, Dm.
4273 unsigned Opc64[] = {ARM::VZIPd8, ARM::VZIPd16, ARM::VTRNd32};
4274 unsigned Opc128[] = {ARM::VZIPq8, ARM::VZIPq16, ARM::VZIPq32};
4275 unsigned Opc = getVectorShuffleOpcode(VT, Opc64, Opc128);
4276 SDValue Pred = getAL(CurDAG, dl);
4277 SDValue PredReg = CurDAG->getRegister(0, MVT::i32);
4278 SDValue Ops[] = {N->getOperand(0), N->getOperand(1), Pred, PredReg};
4279 ReplaceNode(N, CurDAG->getMachineNode(Opc, dl, VT, VT, Ops));
4280 return;
4281 }
4282 case ARMISD::VUZP: {
4283 EVT VT = N->getValueType(0);
4284 // vuzp.32 Dd, Dm is a pseudo-instruction expanded to vtrn.32 Dd, Dm.
4285 unsigned Opc64[] = {ARM::VUZPd8, ARM::VUZPd16, ARM::VTRNd32};
4286 unsigned Opc128[] = {ARM::VUZPq8, ARM::VUZPq16, ARM::VUZPq32};
4287 unsigned Opc = getVectorShuffleOpcode(VT, Opc64, Opc128);
4288 SDValue Pred = getAL(CurDAG, dl);
4289 SDValue PredReg = CurDAG->getRegister(0, MVT::i32);
4290 SDValue Ops[] = {N->getOperand(0), N->getOperand(1), Pred, PredReg};
4291 ReplaceNode(N, CurDAG->getMachineNode(Opc, dl, VT, VT, Ops));
4292 return;
4293 }
4294 case ARMISD::VTRN: {
4295 EVT VT = N->getValueType(0);
4296 unsigned Opc64[] = {ARM::VTRNd8, ARM::VTRNd16, ARM::VTRNd32};
4297 unsigned Opc128[] = {ARM::VTRNq8, ARM::VTRNq16, ARM::VTRNq32};
4298 unsigned Opc = getVectorShuffleOpcode(VT, Opc64, Opc128);
4299 SDValue Pred = getAL(CurDAG, dl);
4300 SDValue PredReg = CurDAG->getRegister(0, MVT::i32);
4301 SDValue Ops[] = {N->getOperand(0), N->getOperand(1), Pred, PredReg};
4302 ReplaceNode(N, CurDAG->getMachineNode(Opc, dl, VT, VT, Ops));
4303 return;
4304 }
4305 case ARMISD::BUILD_VECTOR: {
4306 EVT VecVT = N->getValueType(0);
4307 EVT EltVT = VecVT.getVectorElementType();
4308 unsigned NumElts = VecVT.getVectorNumElements();
4309 if (EltVT == MVT::f64) {
4310 assert(NumElts == 2 && "unexpected type for BUILD_VECTOR");
4311 ReplaceNode(
4312 N, createDRegPairNode(VecVT, N->getOperand(0), N->getOperand(1)));
4313 return;
4314 }
4315 assert(EltVT == MVT::f32 && "unexpected type for BUILD_VECTOR");
4316 if (NumElts == 2) {
4317 ReplaceNode(
4318 N, createSRegPairNode(VecVT, N->getOperand(0), N->getOperand(1)));
4319 return;
4320 }
4321 assert(NumElts == 4 && "unexpected type for BUILD_VECTOR");
4322 ReplaceNode(N,
4323 createQuadSRegsNode(VecVT, N->getOperand(0), N->getOperand(1),
4324 N->getOperand(2), N->getOperand(3)));
4325 return;
4326 }
4327
4328 case ARMISD::VLD1DUP: {
4329 static const uint16_t DOpcodes[] = { ARM::VLD1DUPd8, ARM::VLD1DUPd16,
4330 ARM::VLD1DUPd32 };
4331 static const uint16_t QOpcodes[] = { ARM::VLD1DUPq8, ARM::VLD1DUPq16,
4332 ARM::VLD1DUPq32 };
4333 SelectVLDDup(N, /* IsIntrinsic= */ false, false, 1, DOpcodes, QOpcodes);
4334 return;
4335 }
4336
4337 case ARMISD::VLD2DUP: {
4338 static const uint16_t Opcodes[] = { ARM::VLD2DUPd8, ARM::VLD2DUPd16,
4339 ARM::VLD2DUPd32 };
4340 SelectVLDDup(N, /* IsIntrinsic= */ false, false, 2, Opcodes);
4341 return;
4342 }
4343
4344 case ARMISD::VLD3DUP: {
4345 static const uint16_t Opcodes[] = { ARM::VLD3DUPd8Pseudo,
4346 ARM::VLD3DUPd16Pseudo,
4347 ARM::VLD3DUPd32Pseudo };
4348 SelectVLDDup(N, /* IsIntrinsic= */ false, false, 3, Opcodes);
4349 return;
4350 }
4351
4352 case ARMISD::VLD4DUP: {
4353 static const uint16_t Opcodes[] = { ARM::VLD4DUPd8Pseudo,
4354 ARM::VLD4DUPd16Pseudo,
4355 ARM::VLD4DUPd32Pseudo };
4356 SelectVLDDup(N, /* IsIntrinsic= */ false, false, 4, Opcodes);
4357 return;
4358 }
4359
4360 case ARMISD::VLD1DUP_UPD: {
4361 static const uint16_t DOpcodes[] = { ARM::VLD1DUPd8wb_fixed,
4362 ARM::VLD1DUPd16wb_fixed,
4363 ARM::VLD1DUPd32wb_fixed };
4364 static const uint16_t QOpcodes[] = { ARM::VLD1DUPq8wb_fixed,
4365 ARM::VLD1DUPq16wb_fixed,
4366 ARM::VLD1DUPq32wb_fixed };
4367 SelectVLDDup(N, /* IsIntrinsic= */ false, true, 1, DOpcodes, QOpcodes);
4368 return;
4369 }
4370
4371 case ARMISD::VLD2DUP_UPD: {
4372 static const uint16_t DOpcodes[] = { ARM::VLD2DUPd8wb_fixed,
4373 ARM::VLD2DUPd16wb_fixed,
4374 ARM::VLD2DUPd32wb_fixed,
4375 ARM::VLD1q64wb_fixed };
4376 static const uint16_t QOpcodes0[] = { ARM::VLD2DUPq8EvenPseudo,
4377 ARM::VLD2DUPq16EvenPseudo,
4378 ARM::VLD2DUPq32EvenPseudo };
4379 static const uint16_t QOpcodes1[] = { ARM::VLD2DUPq8OddPseudoWB_fixed,
4380 ARM::VLD2DUPq16OddPseudoWB_fixed,
4381 ARM::VLD2DUPq32OddPseudoWB_fixed };
4382 SelectVLDDup(N, /* IsIntrinsic= */ false, true, 2, DOpcodes, QOpcodes0, QOpcodes1);
4383 return;
4384 }
4385
4386 case ARMISD::VLD3DUP_UPD: {
4387 static const uint16_t DOpcodes[] = { ARM::VLD3DUPd8Pseudo_UPD,
4388 ARM::VLD3DUPd16Pseudo_UPD,
4389 ARM::VLD3DUPd32Pseudo_UPD,
4390 ARM::VLD1d64TPseudoWB_fixed };
4391 static const uint16_t QOpcodes0[] = { ARM::VLD3DUPq8EvenPseudo,
4392 ARM::VLD3DUPq16EvenPseudo,
4393 ARM::VLD3DUPq32EvenPseudo };
4394 static const uint16_t QOpcodes1[] = { ARM::VLD3DUPq8OddPseudo_UPD,
4395 ARM::VLD3DUPq16OddPseudo_UPD,
4396 ARM::VLD3DUPq32OddPseudo_UPD };
4397 SelectVLDDup(N, /* IsIntrinsic= */ false, true, 3, DOpcodes, QOpcodes0, QOpcodes1);
4398 return;
4399 }
4400
4401 case ARMISD::VLD4DUP_UPD: {
4402 static const uint16_t DOpcodes[] = { ARM::VLD4DUPd8Pseudo_UPD,
4403 ARM::VLD4DUPd16Pseudo_UPD,
4404 ARM::VLD4DUPd32Pseudo_UPD,
4405 ARM::VLD1d64QPseudoWB_fixed };
4406 static const uint16_t QOpcodes0[] = { ARM::VLD4DUPq8EvenPseudo,
4407 ARM::VLD4DUPq16EvenPseudo,
4408 ARM::VLD4DUPq32EvenPseudo };
4409 static const uint16_t QOpcodes1[] = { ARM::VLD4DUPq8OddPseudo_UPD,
4410 ARM::VLD4DUPq16OddPseudo_UPD,
4411 ARM::VLD4DUPq32OddPseudo_UPD };
4412 SelectVLDDup(N, /* IsIntrinsic= */ false, true, 4, DOpcodes, QOpcodes0, QOpcodes1);
4413 return;
4414 }
4415
4416 case ARMISD::VLD1_UPD: {
4417 static const uint16_t DOpcodes[] = { ARM::VLD1d8wb_fixed,
4418 ARM::VLD1d16wb_fixed,
4419 ARM::VLD1d32wb_fixed,
4420 ARM::VLD1d64wb_fixed };
4421 static const uint16_t QOpcodes[] = { ARM::VLD1q8wb_fixed,
4422 ARM::VLD1q16wb_fixed,
4423 ARM::VLD1q32wb_fixed,
4424 ARM::VLD1q64wb_fixed };
4425 SelectVLD(N, true, 1, DOpcodes, QOpcodes, nullptr);
4426 return;
4427 }
4428
4429 case ARMISD::VLD2_UPD: {
4430 if (Subtarget->hasNEON()) {
4431 static const uint16_t DOpcodes[] = {
4432 ARM::VLD2d8wb_fixed, ARM::VLD2d16wb_fixed, ARM::VLD2d32wb_fixed,
4433 ARM::VLD1q64wb_fixed};
4434 static const uint16_t QOpcodes[] = {ARM::VLD2q8PseudoWB_fixed,
4435 ARM::VLD2q16PseudoWB_fixed,
4436 ARM::VLD2q32PseudoWB_fixed};
4437 SelectVLD(N, true, 2, DOpcodes, QOpcodes, nullptr);
4438 } else {
4439 static const uint16_t Opcodes8[] = {ARM::MVE_VLD20_8,
4440 ARM::MVE_VLD21_8_wb};
4441 static const uint16_t Opcodes16[] = {ARM::MVE_VLD20_16,
4442 ARM::MVE_VLD21_16_wb};
4443 static const uint16_t Opcodes32[] = {ARM::MVE_VLD20_32,
4444 ARM::MVE_VLD21_32_wb};
4445 static const uint16_t *const Opcodes[] = {Opcodes8, Opcodes16, Opcodes32};
4446 SelectMVE_VLD(N, 2, Opcodes, true);
4447 }
4448 return;
4449 }
4450
4451 case ARMISD::VLD3_UPD: {
4452 static const uint16_t DOpcodes[] = { ARM::VLD3d8Pseudo_UPD,
4453 ARM::VLD3d16Pseudo_UPD,
4454 ARM::VLD3d32Pseudo_UPD,
4455 ARM::VLD1d64TPseudoWB_fixed};
4456 static const uint16_t QOpcodes0[] = { ARM::VLD3q8Pseudo_UPD,
4457 ARM::VLD3q16Pseudo_UPD,
4458 ARM::VLD3q32Pseudo_UPD };
4459 static const uint16_t QOpcodes1[] = { ARM::VLD3q8oddPseudo_UPD,
4460 ARM::VLD3q16oddPseudo_UPD,
4461 ARM::VLD3q32oddPseudo_UPD };
4462 SelectVLD(N, true, 3, DOpcodes, QOpcodes0, QOpcodes1);
4463 return;
4464 }
4465
4466 case ARMISD::VLD4_UPD: {
4467 if (Subtarget->hasNEON()) {
4468 static const uint16_t DOpcodes[] = {
4469 ARM::VLD4d8Pseudo_UPD, ARM::VLD4d16Pseudo_UPD, ARM::VLD4d32Pseudo_UPD,
4470 ARM::VLD1d64QPseudoWB_fixed};
4471 static const uint16_t QOpcodes0[] = {ARM::VLD4q8Pseudo_UPD,
4472 ARM::VLD4q16Pseudo_UPD,
4473 ARM::VLD4q32Pseudo_UPD};
4474 static const uint16_t QOpcodes1[] = {ARM::VLD4q8oddPseudo_UPD,
4475 ARM::VLD4q16oddPseudo_UPD,
4476 ARM::VLD4q32oddPseudo_UPD};
4477 SelectVLD(N, true, 4, DOpcodes, QOpcodes0, QOpcodes1);
4478 } else {
4479 static const uint16_t Opcodes8[] = {ARM::MVE_VLD40_8, ARM::MVE_VLD41_8,
4480 ARM::MVE_VLD42_8,
4481 ARM::MVE_VLD43_8_wb};
4482 static const uint16_t Opcodes16[] = {ARM::MVE_VLD40_16, ARM::MVE_VLD41_16,
4483 ARM::MVE_VLD42_16,
4484 ARM::MVE_VLD43_16_wb};
4485 static const uint16_t Opcodes32[] = {ARM::MVE_VLD40_32, ARM::MVE_VLD41_32,
4486 ARM::MVE_VLD42_32,
4487 ARM::MVE_VLD43_32_wb};
4488 static const uint16_t *const Opcodes[] = {Opcodes8, Opcodes16, Opcodes32};
4489 SelectMVE_VLD(N, 4, Opcodes, true);
4490 }
4491 return;
4492 }
4493
4494 case ARMISD::VLD1x2_UPD: {
4495 if (Subtarget->hasNEON()) {
4496 static const uint16_t DOpcodes[] = {
4497 ARM::VLD1q8wb_fixed, ARM::VLD1q16wb_fixed, ARM::VLD1q32wb_fixed,
4498 ARM::VLD1q64wb_fixed};
4499 static const uint16_t QOpcodes[] = {
4500 ARM::VLD1d8QPseudoWB_fixed, ARM::VLD1d16QPseudoWB_fixed,
4501 ARM::VLD1d32QPseudoWB_fixed, ARM::VLD1d64QPseudoWB_fixed};
4502 SelectVLD(N, true, 2, DOpcodes, QOpcodes, nullptr);
4503 return;
4504 }
4505 break;
4506 }
4507
4508 case ARMISD::VLD1x3_UPD: {
4509 if (Subtarget->hasNEON()) {
4510 static const uint16_t DOpcodes[] = {
4511 ARM::VLD1d8TPseudoWB_fixed, ARM::VLD1d16TPseudoWB_fixed,
4512 ARM::VLD1d32TPseudoWB_fixed, ARM::VLD1d64TPseudoWB_fixed};
4513 static const uint16_t QOpcodes0[] = {
4514 ARM::VLD1q8LowTPseudo_UPD, ARM::VLD1q16LowTPseudo_UPD,
4515 ARM::VLD1q32LowTPseudo_UPD, ARM::VLD1q64LowTPseudo_UPD};
4516 static const uint16_t QOpcodes1[] = {
4517 ARM::VLD1q8HighTPseudo_UPD, ARM::VLD1q16HighTPseudo_UPD,
4518 ARM::VLD1q32HighTPseudo_UPD, ARM::VLD1q64HighTPseudo_UPD};
4519 SelectVLD(N, true, 3, DOpcodes, QOpcodes0, QOpcodes1);
4520 return;
4521 }
4522 break;
4523 }
4524
4525 case ARMISD::VLD1x4_UPD: {
4526 if (Subtarget->hasNEON()) {
4527 static const uint16_t DOpcodes[] = {
4528 ARM::VLD1d8QPseudoWB_fixed, ARM::VLD1d16QPseudoWB_fixed,
4529 ARM::VLD1d32QPseudoWB_fixed, ARM::VLD1d64QPseudoWB_fixed};
4530 static const uint16_t QOpcodes0[] = {
4531 ARM::VLD1q8LowQPseudo_UPD, ARM::VLD1q16LowQPseudo_UPD,
4532 ARM::VLD1q32LowQPseudo_UPD, ARM::VLD1q64LowQPseudo_UPD};
4533 static const uint16_t QOpcodes1[] = {
4534 ARM::VLD1q8HighQPseudo_UPD, ARM::VLD1q16HighQPseudo_UPD,
4535 ARM::VLD1q32HighQPseudo_UPD, ARM::VLD1q64HighQPseudo_UPD};
4536 SelectVLD(N, true, 4, DOpcodes, QOpcodes0, QOpcodes1);
4537 return;
4538 }
4539 break;
4540 }
4541
4542 case ARMISD::VLD2LN_UPD: {
4543 static const uint16_t DOpcodes[] = { ARM::VLD2LNd8Pseudo_UPD,
4544 ARM::VLD2LNd16Pseudo_UPD,
4545 ARM::VLD2LNd32Pseudo_UPD };
4546 static const uint16_t QOpcodes[] = { ARM::VLD2LNq16Pseudo_UPD,
4547 ARM::VLD2LNq32Pseudo_UPD };
4548 SelectVLDSTLane(N, true, true, 2, DOpcodes, QOpcodes);
4549 return;
4550 }
4551
4552 case ARMISD::VLD3LN_UPD: {
4553 static const uint16_t DOpcodes[] = { ARM::VLD3LNd8Pseudo_UPD,
4554 ARM::VLD3LNd16Pseudo_UPD,
4555 ARM::VLD3LNd32Pseudo_UPD };
4556 static const uint16_t QOpcodes[] = { ARM::VLD3LNq16Pseudo_UPD,
4557 ARM::VLD3LNq32Pseudo_UPD };
4558 SelectVLDSTLane(N, true, true, 3, DOpcodes, QOpcodes);
4559 return;
4560 }
4561
4562 case ARMISD::VLD4LN_UPD: {
4563 static const uint16_t DOpcodes[] = { ARM::VLD4LNd8Pseudo_UPD,
4564 ARM::VLD4LNd16Pseudo_UPD,
4565 ARM::VLD4LNd32Pseudo_UPD };
4566 static const uint16_t QOpcodes[] = { ARM::VLD4LNq16Pseudo_UPD,
4567 ARM::VLD4LNq32Pseudo_UPD };
4568 SelectVLDSTLane(N, true, true, 4, DOpcodes, QOpcodes);
4569 return;
4570 }
4571
4572 case ARMISD::VST1_UPD: {
4573 static const uint16_t DOpcodes[] = { ARM::VST1d8wb_fixed,
4574 ARM::VST1d16wb_fixed,
4575 ARM::VST1d32wb_fixed,
4576 ARM::VST1d64wb_fixed };
4577 static const uint16_t QOpcodes[] = { ARM::VST1q8wb_fixed,
4578 ARM::VST1q16wb_fixed,
4579 ARM::VST1q32wb_fixed,
4580 ARM::VST1q64wb_fixed };
4581 SelectVST(N, true, 1, DOpcodes, QOpcodes, nullptr);
4582 return;
4583 }
4584
4585 case ARMISD::VST2_UPD: {
4586 if (Subtarget->hasNEON()) {
4587 static const uint16_t DOpcodes[] = {
4588 ARM::VST2d8wb_fixed, ARM::VST2d16wb_fixed, ARM::VST2d32wb_fixed,
4589 ARM::VST1q64wb_fixed};
4590 static const uint16_t QOpcodes[] = {ARM::VST2q8PseudoWB_fixed,
4591 ARM::VST2q16PseudoWB_fixed,
4592 ARM::VST2q32PseudoWB_fixed};
4593 SelectVST(N, true, 2, DOpcodes, QOpcodes, nullptr);
4594 return;
4595 }
4596 break;
4597 }
4598
4599 case ARMISD::VST3_UPD: {
4600 static const uint16_t DOpcodes[] = { ARM::VST3d8Pseudo_UPD,
4601 ARM::VST3d16Pseudo_UPD,
4602 ARM::VST3d32Pseudo_UPD,
4603 ARM::VST1d64TPseudoWB_fixed};
4604 static const uint16_t QOpcodes0[] = { ARM::VST3q8Pseudo_UPD,
4605 ARM::VST3q16Pseudo_UPD,
4606 ARM::VST3q32Pseudo_UPD };
4607 static const uint16_t QOpcodes1[] = { ARM::VST3q8oddPseudo_UPD,
4608 ARM::VST3q16oddPseudo_UPD,
4609 ARM::VST3q32oddPseudo_UPD };
4610 SelectVST(N, true, 3, DOpcodes, QOpcodes0, QOpcodes1);
4611 return;
4612 }
4613
4614 case ARMISD::VST4_UPD: {
4615 if (Subtarget->hasNEON()) {
4616 static const uint16_t DOpcodes[] = {
4617 ARM::VST4d8Pseudo_UPD, ARM::VST4d16Pseudo_UPD, ARM::VST4d32Pseudo_UPD,
4618 ARM::VST1d64QPseudoWB_fixed};
4619 static const uint16_t QOpcodes0[] = {ARM::VST4q8Pseudo_UPD,
4620 ARM::VST4q16Pseudo_UPD,
4621 ARM::VST4q32Pseudo_UPD};
4622 static const uint16_t QOpcodes1[] = {ARM::VST4q8oddPseudo_UPD,
4623 ARM::VST4q16oddPseudo_UPD,
4624 ARM::VST4q32oddPseudo_UPD};
4625 SelectVST(N, true, 4, DOpcodes, QOpcodes0, QOpcodes1);
4626 return;
4627 }
4628 break;
4629 }
4630
4631 case ARMISD::VST1x2_UPD: {
4632 if (Subtarget->hasNEON()) {
4633 static const uint16_t DOpcodes[] = { ARM::VST1q8wb_fixed,
4634 ARM::VST1q16wb_fixed,
4635 ARM::VST1q32wb_fixed,
4636 ARM::VST1q64wb_fixed};
4637 static const uint16_t QOpcodes[] = { ARM::VST1d8QPseudoWB_fixed,
4638 ARM::VST1d16QPseudoWB_fixed,
4639 ARM::VST1d32QPseudoWB_fixed,
4640 ARM::VST1d64QPseudoWB_fixed };
4641 SelectVST(N, true, 2, DOpcodes, QOpcodes, nullptr);
4642 return;
4643 }
4644 break;
4645 }
4646
4647 case ARMISD::VST1x3_UPD: {
4648 if (Subtarget->hasNEON()) {
4649 static const uint16_t DOpcodes[] = { ARM::VST1d8TPseudoWB_fixed,
4650 ARM::VST1d16TPseudoWB_fixed,
4651 ARM::VST1d32TPseudoWB_fixed,
4652 ARM::VST1d64TPseudoWB_fixed };
4653 static const uint16_t QOpcodes0[] = { ARM::VST1q8LowTPseudo_UPD,
4654 ARM::VST1q16LowTPseudo_UPD,
4655 ARM::VST1q32LowTPseudo_UPD,
4656 ARM::VST1q64LowTPseudo_UPD };
4657 static const uint16_t QOpcodes1[] = { ARM::VST1q8HighTPseudo_UPD,
4658 ARM::VST1q16HighTPseudo_UPD,
4659 ARM::VST1q32HighTPseudo_UPD,
4660 ARM::VST1q64HighTPseudo_UPD };
4661 SelectVST(N, true, 3, DOpcodes, QOpcodes0, QOpcodes1);
4662 return;
4663 }
4664 break;
4665 }
4666
4667 case ARMISD::VST1x4_UPD: {
4668 if (Subtarget->hasNEON()) {
4669 static const uint16_t DOpcodes[] = { ARM::VST1d8QPseudoWB_fixed,
4670 ARM::VST1d16QPseudoWB_fixed,
4671 ARM::VST1d32QPseudoWB_fixed,
4672 ARM::VST1d64QPseudoWB_fixed };
4673 static const uint16_t QOpcodes0[] = { ARM::VST1q8LowQPseudo_UPD,
4674 ARM::VST1q16LowQPseudo_UPD,
4675 ARM::VST1q32LowQPseudo_UPD,
4676 ARM::VST1q64LowQPseudo_UPD };
4677 static const uint16_t QOpcodes1[] = { ARM::VST1q8HighQPseudo_UPD,
4678 ARM::VST1q16HighQPseudo_UPD,
4679 ARM::VST1q32HighQPseudo_UPD,
4680 ARM::VST1q64HighQPseudo_UPD };
4681 SelectVST(N, true, 4, DOpcodes, QOpcodes0, QOpcodes1);
4682 return;
4683 }
4684 break;
4685 }
4686 case ARMISD::VST2LN_UPD: {
4687 static const uint16_t DOpcodes[] = { ARM::VST2LNd8Pseudo_UPD,
4688 ARM::VST2LNd16Pseudo_UPD,
4689 ARM::VST2LNd32Pseudo_UPD };
4690 static const uint16_t QOpcodes[] = { ARM::VST2LNq16Pseudo_UPD,
4691 ARM::VST2LNq32Pseudo_UPD };
4692 SelectVLDSTLane(N, false, true, 2, DOpcodes, QOpcodes);
4693 return;
4694 }
4695
4696 case ARMISD::VST3LN_UPD: {
4697 static const uint16_t DOpcodes[] = { ARM::VST3LNd8Pseudo_UPD,
4698 ARM::VST3LNd16Pseudo_UPD,
4699 ARM::VST3LNd32Pseudo_UPD };
4700 static const uint16_t QOpcodes[] = { ARM::VST3LNq16Pseudo_UPD,
4701 ARM::VST3LNq32Pseudo_UPD };
4702 SelectVLDSTLane(N, false, true, 3, DOpcodes, QOpcodes);
4703 return;
4704 }
4705
4706 case ARMISD::VST4LN_UPD: {
4707 static const uint16_t DOpcodes[] = { ARM::VST4LNd8Pseudo_UPD,
4708 ARM::VST4LNd16Pseudo_UPD,
4709 ARM::VST4LNd32Pseudo_UPD };
4710 static const uint16_t QOpcodes[] = { ARM::VST4LNq16Pseudo_UPD,
4711 ARM::VST4LNq32Pseudo_UPD };
4712 SelectVLDSTLane(N, false, true, 4, DOpcodes, QOpcodes);
4713 return;
4714 }
4715
4718 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
4719 switch (IntNo) {
4720 default:
4721 break;
4722
4723 case Intrinsic::arm_mrrc:
4724 case Intrinsic::arm_mrrc2: {
4725 SDLoc dl(N);
4726 SDValue Chain = N->getOperand(0);
4727 unsigned Opc;
4728
4729 if (Subtarget->isThumb())
4730 Opc = (IntNo == Intrinsic::arm_mrrc ? ARM::t2MRRC : ARM::t2MRRC2);
4731 else
4732 Opc = (IntNo == Intrinsic::arm_mrrc ? ARM::MRRC : ARM::MRRC2);
4733
4735 Ops.push_back(getI32Imm(cast<ConstantSDNode>(N->getOperand(2))->getZExtValue(), dl)); /* coproc */
4736 Ops.push_back(getI32Imm(cast<ConstantSDNode>(N->getOperand(3))->getZExtValue(), dl)); /* opc */
4737 Ops.push_back(getI32Imm(cast<ConstantSDNode>(N->getOperand(4))->getZExtValue(), dl)); /* CRm */
4738
4739 // The mrrc2 instruction in ARM doesn't allow predicates, the top 4 bits of the encoded
4740 // instruction will always be '1111' but it is possible in assembly language to specify
4741 // AL as a predicate to mrrc2 but it doesn't make any difference to the encoded instruction.
4742 if (Opc != ARM::MRRC2) {
4743 Ops.push_back(getAL(CurDAG, dl));
4744 Ops.push_back(CurDAG->getRegister(0, MVT::i32));
4745 }
4746
4747 Ops.push_back(Chain);
4748
4749 // Writes to two registers.
4750 const EVT RetType[] = {MVT::i32, MVT::i32, MVT::Other};
4751
4752 ReplaceNode(N, CurDAG->getMachineNode(Opc, dl, RetType, Ops));
4753 return;
4754 }
4755 case Intrinsic::arm_ldaexd:
4756 case Intrinsic::arm_ldrexd: {
4757 SDLoc dl(N);
4758 SDValue Chain = N->getOperand(0);
4759 SDValue MemAddr = N->getOperand(2);
4760 bool isThumb = Subtarget->isThumb() && Subtarget->hasV8MBaselineOps();
4761
4762 bool IsAcquire = IntNo == Intrinsic::arm_ldaexd;
4763 unsigned NewOpc = isThumb ? (IsAcquire ? ARM::t2LDAEXD : ARM::t2LDREXD)
4764 : (IsAcquire ? ARM::LDAEXD : ARM::LDREXD);
4765
4766 // arm_ldrexd returns a i64 value in {i32, i32}
4767 std::vector<EVT> ResTys;
4768 if (isThumb) {
4769 ResTys.push_back(MVT::i32);
4770 ResTys.push_back(MVT::i32);
4771 } else
4772 ResTys.push_back(MVT::Untyped);
4773 ResTys.push_back(MVT::Other);
4774
4775 // Place arguments in the right order.
4776 SDValue Ops[] = {MemAddr, getAL(CurDAG, dl),
4777 CurDAG->getRegister(0, MVT::i32), Chain};
4778 SDNode *Ld = CurDAG->getMachineNode(NewOpc, dl, ResTys, Ops);
4779 // Transfer memoperands.
4780 MachineMemOperand *MemOp = cast<MemIntrinsicSDNode>(N)->getMemOperand();
4781 CurDAG->setNodeMemRefs(cast<MachineSDNode>(Ld), {MemOp});
4782
4783 // Remap uses.
4784 SDValue OutChain = isThumb ? SDValue(Ld, 2) : SDValue(Ld, 1);
4785 if (!SDValue(N, 0).use_empty()) {
4787 if (isThumb)
4788 Result = SDValue(Ld, 0);
4789 else {
4790