LLVM 19.0.0git
AArch64InstructionSelector.cpp
Go to the documentation of this file.
1//===- AArch64InstructionSelector.cpp ----------------------------*- C++ -*-==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file implements the targeting of the InstructionSelector class for
10/// AArch64.
11/// \todo This should be generated by TableGen.
12//===----------------------------------------------------------------------===//
13
15#include "AArch64InstrInfo.h"
18#include "AArch64RegisterInfo.h"
19#include "AArch64Subtarget.h"
41#include "llvm/IR/Constants.h"
44#include "llvm/IR/IntrinsicsAArch64.h"
46#include "llvm/IR/Type.h"
47#include "llvm/Pass.h"
48#include "llvm/Support/Debug.h"
50#include <optional>
51
52#define DEBUG_TYPE "aarch64-isel"
53
54using namespace llvm;
55using namespace MIPatternMatch;
56using namespace AArch64GISelUtils;
57
58namespace llvm {
61}
62
63namespace {
64
65#define GET_GLOBALISEL_PREDICATE_BITSET
66#include "AArch64GenGlobalISel.inc"
67#undef GET_GLOBALISEL_PREDICATE_BITSET
68
69
70class AArch64InstructionSelector : public InstructionSelector {
71public:
72 AArch64InstructionSelector(const AArch64TargetMachine &TM,
73 const AArch64Subtarget &STI,
74 const AArch64RegisterBankInfo &RBI);
75
76 bool select(MachineInstr &I) override;
77 static const char *getName() { return DEBUG_TYPE; }
78
79 void setupMF(MachineFunction &MF, GISelKnownBits *KB,
80 CodeGenCoverage *CoverageInfo, ProfileSummaryInfo *PSI,
81 BlockFrequencyInfo *BFI) override {
82 InstructionSelector::setupMF(MF, KB, CoverageInfo, PSI, BFI);
83 MIB.setMF(MF);
84
85 // hasFnAttribute() is expensive to call on every BRCOND selection, so
86 // cache it here for each run of the selector.
87 ProduceNonFlagSettingCondBr =
88 !MF.getFunction().hasFnAttribute(Attribute::SpeculativeLoadHardening);
89 MFReturnAddr = Register();
90
91 processPHIs(MF);
92 }
93
94private:
95 /// tblgen-erated 'select' implementation, used as the initial selector for
96 /// the patterns that don't require complex C++.
97 bool selectImpl(MachineInstr &I, CodeGenCoverage &CoverageInfo) const;
98
99 // A lowering phase that runs before any selection attempts.
100 // Returns true if the instruction was modified.
101 bool preISelLower(MachineInstr &I);
102
103 // An early selection function that runs before the selectImpl() call.
104 bool earlySelect(MachineInstr &I);
105
106 /// Save state that is shared between select calls, call select on \p I and
107 /// then restore the saved state. This can be used to recursively call select
108 /// within a select call.
109 bool selectAndRestoreState(MachineInstr &I);
110
111 // Do some preprocessing of G_PHIs before we begin selection.
112 void processPHIs(MachineFunction &MF);
113
114 bool earlySelectSHL(MachineInstr &I, MachineRegisterInfo &MRI);
115
116 /// Eliminate same-sized cross-bank copies into stores before selectImpl().
117 bool contractCrossBankCopyIntoStore(MachineInstr &I,
119
120 bool convertPtrAddToAdd(MachineInstr &I, MachineRegisterInfo &MRI);
121
122 bool selectVaStartAAPCS(MachineInstr &I, MachineFunction &MF,
123 MachineRegisterInfo &MRI) const;
124 bool selectVaStartDarwin(MachineInstr &I, MachineFunction &MF,
125 MachineRegisterInfo &MRI) const;
126
127 ///@{
128 /// Helper functions for selectCompareBranch.
129 bool selectCompareBranchFedByFCmp(MachineInstr &I, MachineInstr &FCmp,
130 MachineIRBuilder &MIB) const;
131 bool selectCompareBranchFedByICmp(MachineInstr &I, MachineInstr &ICmp,
132 MachineIRBuilder &MIB) const;
133 bool tryOptCompareBranchFedByICmp(MachineInstr &I, MachineInstr &ICmp,
134 MachineIRBuilder &MIB) const;
135 bool tryOptAndIntoCompareBranch(MachineInstr &AndInst, bool Invert,
136 MachineBasicBlock *DstMBB,
137 MachineIRBuilder &MIB) const;
138 ///@}
139
140 bool selectCompareBranch(MachineInstr &I, MachineFunction &MF,
142
143 bool selectVectorAshrLshr(MachineInstr &I, MachineRegisterInfo &MRI);
144 bool selectVectorSHL(MachineInstr &I, MachineRegisterInfo &MRI);
145
146 // Helper to generate an equivalent of scalar_to_vector into a new register,
147 // returned via 'Dst'.
148 MachineInstr *emitScalarToVector(unsigned EltSize,
149 const TargetRegisterClass *DstRC,
150 Register Scalar,
151 MachineIRBuilder &MIRBuilder) const;
152 /// Helper to narrow vector that was widened by emitScalarToVector.
153 /// Copy lowest part of 128-bit or 64-bit vector to 64-bit or 32-bit
154 /// vector, correspondingly.
155 MachineInstr *emitNarrowVector(Register DstReg, Register SrcReg,
156 MachineIRBuilder &MIRBuilder,
157 MachineRegisterInfo &MRI) const;
158
159 /// Emit a lane insert into \p DstReg, or a new vector register if
160 /// std::nullopt is provided.
161 ///
162 /// The lane inserted into is defined by \p LaneIdx. The vector source
163 /// register is given by \p SrcReg. The register containing the element is
164 /// given by \p EltReg.
165 MachineInstr *emitLaneInsert(std::optional<Register> DstReg, Register SrcReg,
166 Register EltReg, unsigned LaneIdx,
167 const RegisterBank &RB,
168 MachineIRBuilder &MIRBuilder) const;
169
170 /// Emit a sequence of instructions representing a constant \p CV for a
171 /// vector register \p Dst. (E.g. a MOV, or a load from a constant pool.)
172 ///
173 /// \returns the last instruction in the sequence on success, and nullptr
174 /// otherwise.
175 MachineInstr *emitConstantVector(Register Dst, Constant *CV,
176 MachineIRBuilder &MIRBuilder,
178
179 MachineInstr *tryAdvSIMDModImm8(Register Dst, unsigned DstSize, APInt Bits,
180 MachineIRBuilder &MIRBuilder);
181
182 MachineInstr *tryAdvSIMDModImm16(Register Dst, unsigned DstSize, APInt Bits,
183 MachineIRBuilder &MIRBuilder, bool Inv);
184
185 MachineInstr *tryAdvSIMDModImm32(Register Dst, unsigned DstSize, APInt Bits,
186 MachineIRBuilder &MIRBuilder, bool Inv);
187 MachineInstr *tryAdvSIMDModImm64(Register Dst, unsigned DstSize, APInt Bits,
188 MachineIRBuilder &MIRBuilder);
189 MachineInstr *tryAdvSIMDModImm321s(Register Dst, unsigned DstSize, APInt Bits,
190 MachineIRBuilder &MIRBuilder, bool Inv);
191 MachineInstr *tryAdvSIMDModImmFP(Register Dst, unsigned DstSize, APInt Bits,
192 MachineIRBuilder &MIRBuilder);
193
194 bool selectInsertElt(MachineInstr &I, MachineRegisterInfo &MRI);
195 bool tryOptConstantBuildVec(MachineInstr &MI, LLT DstTy,
197 /// \returns true if a G_BUILD_VECTOR instruction \p MI can be selected as a
198 /// SUBREG_TO_REG.
199 bool tryOptBuildVecToSubregToReg(MachineInstr &MI, MachineRegisterInfo &MRI);
200 bool selectBuildVector(MachineInstr &I, MachineRegisterInfo &MRI);
203
204 bool selectShuffleVector(MachineInstr &I, MachineRegisterInfo &MRI);
205 bool selectExtractElt(MachineInstr &I, MachineRegisterInfo &MRI);
206 bool selectConcatVectors(MachineInstr &I, MachineRegisterInfo &MRI);
207 bool selectSplitVectorUnmerge(MachineInstr &I, MachineRegisterInfo &MRI);
208
209 /// Helper function to select vector load intrinsics like
210 /// @llvm.aarch64.neon.ld2.*, @llvm.aarch64.neon.ld4.*, etc.
211 /// \p Opc is the opcode that the selected instruction should use.
212 /// \p NumVecs is the number of vector destinations for the instruction.
213 /// \p I is the original G_INTRINSIC_W_SIDE_EFFECTS instruction.
214 bool selectVectorLoadIntrinsic(unsigned Opc, unsigned NumVecs,
215 MachineInstr &I);
216 bool selectVectorLoadLaneIntrinsic(unsigned Opc, unsigned NumVecs,
217 MachineInstr &I);
218 void selectVectorStoreIntrinsic(MachineInstr &I, unsigned NumVecs,
219 unsigned Opc);
220 bool selectVectorStoreLaneIntrinsic(MachineInstr &I, unsigned NumVecs,
221 unsigned Opc);
222 bool selectIntrinsicWithSideEffects(MachineInstr &I,
224 bool selectIntrinsic(MachineInstr &I, MachineRegisterInfo &MRI);
225 bool selectVectorICmp(MachineInstr &I, MachineRegisterInfo &MRI);
226 bool selectJumpTable(MachineInstr &I, MachineRegisterInfo &MRI);
227 bool selectBrJT(MachineInstr &I, MachineRegisterInfo &MRI);
228 bool selectTLSGlobalValue(MachineInstr &I, MachineRegisterInfo &MRI);
229 bool selectReduction(MachineInstr &I, MachineRegisterInfo &MRI);
230 bool selectMOPS(MachineInstr &I, MachineRegisterInfo &MRI);
231 bool selectUSMovFromExtend(MachineInstr &I, MachineRegisterInfo &MRI);
232
233 bool selectIndexedExtLoad(MachineInstr &I, MachineRegisterInfo &MRI);
234 bool selectIndexedLoad(MachineInstr &I, MachineRegisterInfo &MRI);
235 bool selectIndexedStore(GIndexedStore &I, MachineRegisterInfo &MRI);
236
237 unsigned emitConstantPoolEntry(const Constant *CPVal,
238 MachineFunction &MF) const;
240 MachineIRBuilder &MIRBuilder) const;
241
242 // Emit a vector concat operation.
243 MachineInstr *emitVectorConcat(std::optional<Register> Dst, Register Op1,
244 Register Op2,
245 MachineIRBuilder &MIRBuilder) const;
246
247 // Emit an integer compare between LHS and RHS, which checks for Predicate.
248 MachineInstr *emitIntegerCompare(MachineOperand &LHS, MachineOperand &RHS,
249 MachineOperand &Predicate,
250 MachineIRBuilder &MIRBuilder) const;
251
252 /// Emit a floating point comparison between \p LHS and \p RHS.
253 /// \p Pred if given is the intended predicate to use.
255 emitFPCompare(Register LHS, Register RHS, MachineIRBuilder &MIRBuilder,
256 std::optional<CmpInst::Predicate> = std::nullopt) const;
257
259 emitInstr(unsigned Opcode, std::initializer_list<llvm::DstOp> DstOps,
260 std::initializer_list<llvm::SrcOp> SrcOps,
261 MachineIRBuilder &MIRBuilder,
262 const ComplexRendererFns &RenderFns = std::nullopt) const;
263 /// Helper function to emit an add or sub instruction.
264 ///
265 /// \p AddrModeAndSizeToOpcode must contain each of the opcode variants above
266 /// in a specific order.
267 ///
268 /// Below is an example of the expected input to \p AddrModeAndSizeToOpcode.
269 ///
270 /// \code
271 /// const std::array<std::array<unsigned, 2>, 4> Table {
272 /// {{AArch64::ADDXri, AArch64::ADDWri},
273 /// {AArch64::ADDXrs, AArch64::ADDWrs},
274 /// {AArch64::ADDXrr, AArch64::ADDWrr},
275 /// {AArch64::SUBXri, AArch64::SUBWri},
276 /// {AArch64::ADDXrx, AArch64::ADDWrx}}};
277 /// \endcode
278 ///
279 /// Each row in the table corresponds to a different addressing mode. Each
280 /// column corresponds to a different register size.
281 ///
282 /// \attention Rows must be structured as follows:
283 /// - Row 0: The ri opcode variants
284 /// - Row 1: The rs opcode variants
285 /// - Row 2: The rr opcode variants
286 /// - Row 3: The ri opcode variants for negative immediates
287 /// - Row 4: The rx opcode variants
288 ///
289 /// \attention Columns must be structured as follows:
290 /// - Column 0: The 64-bit opcode variants
291 /// - Column 1: The 32-bit opcode variants
292 ///
293 /// \p Dst is the destination register of the binop to emit.
294 /// \p LHS is the left-hand operand of the binop to emit.
295 /// \p RHS is the right-hand operand of the binop to emit.
296 MachineInstr *emitAddSub(
297 const std::array<std::array<unsigned, 2>, 5> &AddrModeAndSizeToOpcode,
299 MachineIRBuilder &MIRBuilder) const;
300 MachineInstr *emitADD(Register DefReg, MachineOperand &LHS,
302 MachineIRBuilder &MIRBuilder) const;
304 MachineIRBuilder &MIRBuilder) const;
306 MachineIRBuilder &MIRBuilder) const;
308 MachineIRBuilder &MIRBuilder) const;
310 MachineIRBuilder &MIRBuilder) const;
312 MachineIRBuilder &MIRBuilder) const;
314 MachineIRBuilder &MIRBuilder) const;
315 MachineInstr *emitSelect(Register Dst, Register LHS, Register RHS,
317 MachineIRBuilder &MIRBuilder) const;
318 MachineInstr *emitExtractVectorElt(std::optional<Register> DstReg,
319 const RegisterBank &DstRB, LLT ScalarTy,
320 Register VecReg, unsigned LaneIdx,
321 MachineIRBuilder &MIRBuilder) const;
322 MachineInstr *emitCSINC(Register Dst, Register Src1, Register Src2,
324 MachineIRBuilder &MIRBuilder) const;
325 /// Emit a CSet for a FP compare.
326 ///
327 /// \p Dst is expected to be a 32-bit scalar register.
328 MachineInstr *emitCSetForFCmp(Register Dst, CmpInst::Predicate Pred,
329 MachineIRBuilder &MIRBuilder) const;
330
331 /// Emit an instruction that sets NZCV to the carry-in expected by \p I.
332 /// Might elide the instruction if the previous instruction already sets NZCV
333 /// correctly.
334 MachineInstr *emitCarryIn(MachineInstr &I, Register CarryReg);
335
336 /// Emit the overflow op for \p Opcode.
337 ///
338 /// \p Opcode is expected to be an overflow op's opcode, e.g. G_UADDO,
339 /// G_USUBO, etc.
340 std::pair<MachineInstr *, AArch64CC::CondCode>
341 emitOverflowOp(unsigned Opcode, Register Dst, MachineOperand &LHS,
342 MachineOperand &RHS, MachineIRBuilder &MIRBuilder) const;
343
344 bool selectOverflowOp(MachineInstr &I, MachineRegisterInfo &MRI);
345
346 /// Emit expression as a conjunction (a series of CCMP/CFCMP ops).
347 /// In some cases this is even possible with OR operations in the expression.
349 MachineIRBuilder &MIB) const;
352 AArch64CC::CondCode Predicate,
354 MachineIRBuilder &MIB) const;
356 bool Negate, Register CCOp,
357 AArch64CC::CondCode Predicate,
358 MachineIRBuilder &MIB) const;
359
360 /// Emit a TB(N)Z instruction which tests \p Bit in \p TestReg.
361 /// \p IsNegative is true if the test should be "not zero".
362 /// This will also optimize the test bit instruction when possible.
363 MachineInstr *emitTestBit(Register TestReg, uint64_t Bit, bool IsNegative,
364 MachineBasicBlock *DstMBB,
365 MachineIRBuilder &MIB) const;
366
367 /// Emit a CB(N)Z instruction which branches to \p DestMBB.
368 MachineInstr *emitCBZ(Register CompareReg, bool IsNegative,
369 MachineBasicBlock *DestMBB,
370 MachineIRBuilder &MIB) const;
371
372 // Equivalent to the i32shift_a and friends from AArch64InstrInfo.td.
373 // We use these manually instead of using the importer since it doesn't
374 // support SDNodeXForm.
375 ComplexRendererFns selectShiftA_32(const MachineOperand &Root) const;
376 ComplexRendererFns selectShiftB_32(const MachineOperand &Root) const;
377 ComplexRendererFns selectShiftA_64(const MachineOperand &Root) const;
378 ComplexRendererFns selectShiftB_64(const MachineOperand &Root) const;
379
380 ComplexRendererFns select12BitValueWithLeftShift(uint64_t Immed) const;
381 ComplexRendererFns selectArithImmed(MachineOperand &Root) const;
382 ComplexRendererFns selectNegArithImmed(MachineOperand &Root) const;
383
384 ComplexRendererFns selectAddrModeUnscaled(MachineOperand &Root,
385 unsigned Size) const;
386
387 ComplexRendererFns selectAddrModeUnscaled8(MachineOperand &Root) const {
388 return selectAddrModeUnscaled(Root, 1);
389 }
390 ComplexRendererFns selectAddrModeUnscaled16(MachineOperand &Root) const {
391 return selectAddrModeUnscaled(Root, 2);
392 }
393 ComplexRendererFns selectAddrModeUnscaled32(MachineOperand &Root) const {
394 return selectAddrModeUnscaled(Root, 4);
395 }
396 ComplexRendererFns selectAddrModeUnscaled64(MachineOperand &Root) const {
397 return selectAddrModeUnscaled(Root, 8);
398 }
399 ComplexRendererFns selectAddrModeUnscaled128(MachineOperand &Root) const {
400 return selectAddrModeUnscaled(Root, 16);
401 }
402
403 /// Helper to try to fold in a GISEL_ADD_LOW into an immediate, to be used
404 /// from complex pattern matchers like selectAddrModeIndexed().
405 ComplexRendererFns tryFoldAddLowIntoImm(MachineInstr &RootDef, unsigned Size,
406 MachineRegisterInfo &MRI) const;
407
408 ComplexRendererFns selectAddrModeIndexed(MachineOperand &Root,
409 unsigned Size) const;
410 template <int Width>
411 ComplexRendererFns selectAddrModeIndexed(MachineOperand &Root) const {
412 return selectAddrModeIndexed(Root, Width / 8);
413 }
414
415 bool isWorthFoldingIntoExtendedReg(MachineInstr &MI,
416 const MachineRegisterInfo &MRI) const;
417 ComplexRendererFns
418 selectAddrModeShiftedExtendXReg(MachineOperand &Root,
419 unsigned SizeInBytes) const;
420
421 /// Returns a \p ComplexRendererFns which contains a base, offset, and whether
422 /// or not a shift + extend should be folded into an addressing mode. Returns
423 /// None when this is not profitable or possible.
424 ComplexRendererFns
425 selectExtendedSHL(MachineOperand &Root, MachineOperand &Base,
426 MachineOperand &Offset, unsigned SizeInBytes,
427 bool WantsExt) const;
428 ComplexRendererFns selectAddrModeRegisterOffset(MachineOperand &Root) const;
429 ComplexRendererFns selectAddrModeXRO(MachineOperand &Root,
430 unsigned SizeInBytes) const;
431 template <int Width>
432 ComplexRendererFns selectAddrModeXRO(MachineOperand &Root) const {
433 return selectAddrModeXRO(Root, Width / 8);
434 }
435
436 ComplexRendererFns selectAddrModeWRO(MachineOperand &Root,
437 unsigned SizeInBytes) const;
438 template <int Width>
439 ComplexRendererFns selectAddrModeWRO(MachineOperand &Root) const {
440 return selectAddrModeWRO(Root, Width / 8);
441 }
442
443 ComplexRendererFns selectShiftedRegister(MachineOperand &Root,
444 bool AllowROR = false) const;
445
446 ComplexRendererFns selectArithShiftedRegister(MachineOperand &Root) const {
447 return selectShiftedRegister(Root);
448 }
449
450 ComplexRendererFns selectLogicalShiftedRegister(MachineOperand &Root) const {
451 return selectShiftedRegister(Root, true);
452 }
453
454 /// Given an extend instruction, determine the correct shift-extend type for
455 /// that instruction.
456 ///
457 /// If the instruction is going to be used in a load or store, pass
458 /// \p IsLoadStore = true.
460 getExtendTypeForInst(MachineInstr &MI, MachineRegisterInfo &MRI,
461 bool IsLoadStore = false) const;
462
463 /// Move \p Reg to \p RC if \p Reg is not already on \p RC.
464 ///
465 /// \returns Either \p Reg if no change was necessary, or the new register
466 /// created by moving \p Reg.
467 ///
468 /// Note: This uses emitCopy right now.
469 Register moveScalarRegClass(Register Reg, const TargetRegisterClass &RC,
470 MachineIRBuilder &MIB) const;
471
472 ComplexRendererFns selectArithExtendedRegister(MachineOperand &Root) const;
473
474 ComplexRendererFns selectExtractHigh(MachineOperand &Root) const;
475
476 void renderTruncImm(MachineInstrBuilder &MIB, const MachineInstr &MI,
477 int OpIdx = -1) const;
478 void renderLogicalImm32(MachineInstrBuilder &MIB, const MachineInstr &I,
479 int OpIdx = -1) const;
480 void renderLogicalImm64(MachineInstrBuilder &MIB, const MachineInstr &I,
481 int OpIdx = -1) const;
482 void renderFPImm16(MachineInstrBuilder &MIB, const MachineInstr &MI,
483 int OpIdx = -1) const;
484 void renderFPImm32(MachineInstrBuilder &MIB, const MachineInstr &MI,
485 int OpIdx = -1) const;
486 void renderFPImm64(MachineInstrBuilder &MIB, const MachineInstr &MI,
487 int OpIdx = -1) const;
488 void renderFPImm32SIMDModImmType4(MachineInstrBuilder &MIB,
489 const MachineInstr &MI,
490 int OpIdx = -1) const;
491
492 // Materialize a GlobalValue or BlockAddress using a movz+movk sequence.
493 void materializeLargeCMVal(MachineInstr &I, const Value *V, unsigned OpFlags);
494
495 // Optimization methods.
496 bool tryOptSelect(GSelect &Sel);
497 bool tryOptSelectConjunction(GSelect &Sel, MachineInstr &CondMI);
498 MachineInstr *tryFoldIntegerCompare(MachineOperand &LHS, MachineOperand &RHS,
499 MachineOperand &Predicate,
500 MachineIRBuilder &MIRBuilder) const;
501
502 /// Return true if \p MI is a load or store of \p NumBytes bytes.
503 bool isLoadStoreOfNumBytes(const MachineInstr &MI, unsigned NumBytes) const;
504
505 /// Returns true if \p MI is guaranteed to have the high-half of a 64-bit
506 /// register zeroed out. In other words, the result of MI has been explicitly
507 /// zero extended.
508 bool isDef32(const MachineInstr &MI) const;
509
511 const AArch64Subtarget &STI;
512 const AArch64InstrInfo &TII;
514 const AArch64RegisterBankInfo &RBI;
515
516 bool ProduceNonFlagSettingCondBr = false;
517
518 // Some cached values used during selection.
519 // We use LR as a live-in register, and we keep track of it here as it can be
520 // clobbered by calls.
521 Register MFReturnAddr;
522
524
525#define GET_GLOBALISEL_PREDICATES_DECL
526#include "AArch64GenGlobalISel.inc"
527#undef GET_GLOBALISEL_PREDICATES_DECL
528
529// We declare the temporaries used by selectImpl() in the class to minimize the
530// cost of constructing placeholder values.
531#define GET_GLOBALISEL_TEMPORARIES_DECL
532#include "AArch64GenGlobalISel.inc"
533#undef GET_GLOBALISEL_TEMPORARIES_DECL
534};
535
536} // end anonymous namespace
537
538#define GET_GLOBALISEL_IMPL
539#include "AArch64GenGlobalISel.inc"
540#undef GET_GLOBALISEL_IMPL
541
542AArch64InstructionSelector::AArch64InstructionSelector(
543 const AArch64TargetMachine &TM, const AArch64Subtarget &STI,
544 const AArch64RegisterBankInfo &RBI)
545 : TM(TM), STI(STI), TII(*STI.getInstrInfo()), TRI(*STI.getRegisterInfo()),
546 RBI(RBI),
548#include "AArch64GenGlobalISel.inc"
551#include "AArch64GenGlobalISel.inc"
553{
554}
555
556// FIXME: This should be target-independent, inferred from the types declared
557// for each class in the bank.
558//
559/// Given a register bank, and a type, return the smallest register class that
560/// can represent that combination.
561static const TargetRegisterClass *
562getRegClassForTypeOnBank(LLT Ty, const RegisterBank &RB,
563 bool GetAllRegSet = false) {
564 if (RB.getID() == AArch64::GPRRegBankID) {
565 if (Ty.getSizeInBits() <= 32)
566 return GetAllRegSet ? &AArch64::GPR32allRegClass
567 : &AArch64::GPR32RegClass;
568 if (Ty.getSizeInBits() == 64)
569 return GetAllRegSet ? &AArch64::GPR64allRegClass
570 : &AArch64::GPR64RegClass;
571 if (Ty.getSizeInBits() == 128)
572 return &AArch64::XSeqPairsClassRegClass;
573 return nullptr;
574 }
575
576 if (RB.getID() == AArch64::FPRRegBankID) {
577 switch (Ty.getSizeInBits()) {
578 case 8:
579 return &AArch64::FPR8RegClass;
580 case 16:
581 return &AArch64::FPR16RegClass;
582 case 32:
583 return &AArch64::FPR32RegClass;
584 case 64:
585 return &AArch64::FPR64RegClass;
586 case 128:
587 return &AArch64::FPR128RegClass;
588 }
589 return nullptr;
590 }
591
592 return nullptr;
593}
594
595/// Given a register bank, and size in bits, return the smallest register class
596/// that can represent that combination.
597static const TargetRegisterClass *
598getMinClassForRegBank(const RegisterBank &RB, unsigned SizeInBits,
599 bool GetAllRegSet = false) {
600 unsigned RegBankID = RB.getID();
601
602 if (RegBankID == AArch64::GPRRegBankID) {
603 if (SizeInBits <= 32)
604 return GetAllRegSet ? &AArch64::GPR32allRegClass
605 : &AArch64::GPR32RegClass;
606 if (SizeInBits == 64)
607 return GetAllRegSet ? &AArch64::GPR64allRegClass
608 : &AArch64::GPR64RegClass;
609 if (SizeInBits == 128)
610 return &AArch64::XSeqPairsClassRegClass;
611 }
612
613 if (RegBankID == AArch64::FPRRegBankID) {
614 switch (SizeInBits) {
615 default:
616 return nullptr;
617 case 8:
618 return &AArch64::FPR8RegClass;
619 case 16:
620 return &AArch64::FPR16RegClass;
621 case 32:
622 return &AArch64::FPR32RegClass;
623 case 64:
624 return &AArch64::FPR64RegClass;
625 case 128:
626 return &AArch64::FPR128RegClass;
627 }
628 }
629
630 return nullptr;
631}
632
633/// Returns the correct subregister to use for a given register class.
635 const TargetRegisterInfo &TRI, unsigned &SubReg) {
636 switch (TRI.getRegSizeInBits(*RC)) {
637 case 8:
638 SubReg = AArch64::bsub;
639 break;
640 case 16:
641 SubReg = AArch64::hsub;
642 break;
643 case 32:
644 if (RC != &AArch64::FPR32RegClass)
645 SubReg = AArch64::sub_32;
646 else
647 SubReg = AArch64::ssub;
648 break;
649 case 64:
650 SubReg = AArch64::dsub;
651 break;
652 default:
654 dbgs() << "Couldn't find appropriate subregister for register class.");
655 return false;
656 }
657
658 return true;
659}
660
661/// Returns the minimum size the given register bank can hold.
662static unsigned getMinSizeForRegBank(const RegisterBank &RB) {
663 switch (RB.getID()) {
664 case AArch64::GPRRegBankID:
665 return 32;
666 case AArch64::FPRRegBankID:
667 return 8;
668 default:
669 llvm_unreachable("Tried to get minimum size for unknown register bank.");
670 }
671}
672
673/// Create a REG_SEQUENCE instruction using the registers in \p Regs.
674/// Helper function for functions like createDTuple and createQTuple.
675///
676/// \p RegClassIDs - The list of register class IDs available for some tuple of
677/// a scalar class. E.g. QQRegClassID, QQQRegClassID, QQQQRegClassID. This is
678/// expected to contain between 2 and 4 tuple classes.
679///
680/// \p SubRegs - The list of subregister classes associated with each register
681/// class ID in \p RegClassIDs. E.g., QQRegClassID should use the qsub0
682/// subregister class. The index of each subregister class is expected to
683/// correspond with the index of each register class.
684///
685/// \returns Either the destination register of REG_SEQUENCE instruction that
686/// was created, or the 0th element of \p Regs if \p Regs contains a single
687/// element.
689 const unsigned RegClassIDs[],
690 const unsigned SubRegs[], MachineIRBuilder &MIB) {
691 unsigned NumRegs = Regs.size();
692 if (NumRegs == 1)
693 return Regs[0];
694 assert(NumRegs >= 2 && NumRegs <= 4 &&
695 "Only support between two and 4 registers in a tuple!");
697 auto *DesiredClass = TRI->getRegClass(RegClassIDs[NumRegs - 2]);
698 auto RegSequence =
699 MIB.buildInstr(TargetOpcode::REG_SEQUENCE, {DesiredClass}, {});
700 for (unsigned I = 0, E = Regs.size(); I < E; ++I) {
701 RegSequence.addUse(Regs[I]);
702 RegSequence.addImm(SubRegs[I]);
703 }
704 return RegSequence.getReg(0);
705}
706
707/// Create a tuple of D-registers using the registers in \p Regs.
709 static const unsigned RegClassIDs[] = {
710 AArch64::DDRegClassID, AArch64::DDDRegClassID, AArch64::DDDDRegClassID};
711 static const unsigned SubRegs[] = {AArch64::dsub0, AArch64::dsub1,
712 AArch64::dsub2, AArch64::dsub3};
713 return createTuple(Regs, RegClassIDs, SubRegs, MIB);
714}
715
716/// Create a tuple of Q-registers using the registers in \p Regs.
718 static const unsigned RegClassIDs[] = {
719 AArch64::QQRegClassID, AArch64::QQQRegClassID, AArch64::QQQQRegClassID};
720 static const unsigned SubRegs[] = {AArch64::qsub0, AArch64::qsub1,
721 AArch64::qsub2, AArch64::qsub3};
722 return createTuple(Regs, RegClassIDs, SubRegs, MIB);
723}
724
725static std::optional<uint64_t> getImmedFromMO(const MachineOperand &Root) {
726 auto &MI = *Root.getParent();
727 auto &MBB = *MI.getParent();
728 auto &MF = *MBB.getParent();
729 auto &MRI = MF.getRegInfo();
730 uint64_t Immed;
731 if (Root.isImm())
732 Immed = Root.getImm();
733 else if (Root.isCImm())
734 Immed = Root.getCImm()->getZExtValue();
735 else if (Root.isReg()) {
736 auto ValAndVReg =
738 if (!ValAndVReg)
739 return std::nullopt;
740 Immed = ValAndVReg->Value.getSExtValue();
741 } else
742 return std::nullopt;
743 return Immed;
744}
745
746/// Check whether \p I is a currently unsupported binary operation:
747/// - it has an unsized type
748/// - an operand is not a vreg
749/// - all operands are not in the same bank
750/// These are checks that should someday live in the verifier, but right now,
751/// these are mostly limitations of the aarch64 selector.
752static bool unsupportedBinOp(const MachineInstr &I,
753 const AArch64RegisterBankInfo &RBI,
755 const AArch64RegisterInfo &TRI) {
756 LLT Ty = MRI.getType(I.getOperand(0).getReg());
757 if (!Ty.isValid()) {
758 LLVM_DEBUG(dbgs() << "Generic binop register should be typed\n");
759 return true;
760 }
761
762 const RegisterBank *PrevOpBank = nullptr;
763 for (auto &MO : I.operands()) {
764 // FIXME: Support non-register operands.
765 if (!MO.isReg()) {
766 LLVM_DEBUG(dbgs() << "Generic inst non-reg operands are unsupported\n");
767 return true;
768 }
769
770 // FIXME: Can generic operations have physical registers operands? If
771 // so, this will need to be taught about that, and we'll need to get the
772 // bank out of the minimal class for the register.
773 // Either way, this needs to be documented (and possibly verified).
774 if (!MO.getReg().isVirtual()) {
775 LLVM_DEBUG(dbgs() << "Generic inst has physical register operand\n");
776 return true;
777 }
778
779 const RegisterBank *OpBank = RBI.getRegBank(MO.getReg(), MRI, TRI);
780 if (!OpBank) {
781 LLVM_DEBUG(dbgs() << "Generic register has no bank or class\n");
782 return true;
783 }
784
785 if (PrevOpBank && OpBank != PrevOpBank) {
786 LLVM_DEBUG(dbgs() << "Generic inst operands have different banks\n");
787 return true;
788 }
789 PrevOpBank = OpBank;
790 }
791 return false;
792}
793
794/// Select the AArch64 opcode for the basic binary operation \p GenericOpc
795/// (such as G_OR or G_SDIV), appropriate for the register bank \p RegBankID
796/// and of size \p OpSize.
797/// \returns \p GenericOpc if the combination is unsupported.
798static unsigned selectBinaryOp(unsigned GenericOpc, unsigned RegBankID,
799 unsigned OpSize) {
800 switch (RegBankID) {
801 case AArch64::GPRRegBankID:
802 if (OpSize == 32) {
803 switch (GenericOpc) {
804 case TargetOpcode::G_SHL:
805 return AArch64::LSLVWr;
806 case TargetOpcode::G_LSHR:
807 return AArch64::LSRVWr;
808 case TargetOpcode::G_ASHR:
809 return AArch64::ASRVWr;
810 default:
811 return GenericOpc;
812 }
813 } else if (OpSize == 64) {
814 switch (GenericOpc) {
815 case TargetOpcode::G_PTR_ADD:
816 return AArch64::ADDXrr;
817 case TargetOpcode::G_SHL:
818 return AArch64::LSLVXr;
819 case TargetOpcode::G_LSHR:
820 return AArch64::LSRVXr;
821 case TargetOpcode::G_ASHR:
822 return AArch64::ASRVXr;
823 default:
824 return GenericOpc;
825 }
826 }
827 break;
828 case AArch64::FPRRegBankID:
829 switch (OpSize) {
830 case 32:
831 switch (GenericOpc) {
832 case TargetOpcode::G_FADD:
833 return AArch64::FADDSrr;
834 case TargetOpcode::G_FSUB:
835 return AArch64::FSUBSrr;
836 case TargetOpcode::G_FMUL:
837 return AArch64::FMULSrr;
838 case TargetOpcode::G_FDIV:
839 return AArch64::FDIVSrr;
840 default:
841 return GenericOpc;
842 }
843 case 64:
844 switch (GenericOpc) {
845 case TargetOpcode::G_FADD:
846 return AArch64::FADDDrr;
847 case TargetOpcode::G_FSUB:
848 return AArch64::FSUBDrr;
849 case TargetOpcode::G_FMUL:
850 return AArch64::FMULDrr;
851 case TargetOpcode::G_FDIV:
852 return AArch64::FDIVDrr;
853 case TargetOpcode::G_OR:
854 return AArch64::ORRv8i8;
855 default:
856 return GenericOpc;
857 }
858 }
859 break;
860 }
861 return GenericOpc;
862}
863
864/// Select the AArch64 opcode for the G_LOAD or G_STORE operation \p GenericOpc,
865/// appropriate for the (value) register bank \p RegBankID and of memory access
866/// size \p OpSize. This returns the variant with the base+unsigned-immediate
867/// addressing mode (e.g., LDRXui).
868/// \returns \p GenericOpc if the combination is unsupported.
869static unsigned selectLoadStoreUIOp(unsigned GenericOpc, unsigned RegBankID,
870 unsigned OpSize) {
871 const bool isStore = GenericOpc == TargetOpcode::G_STORE;
872 switch (RegBankID) {
873 case AArch64::GPRRegBankID:
874 switch (OpSize) {
875 case 8:
876 return isStore ? AArch64::STRBBui : AArch64::LDRBBui;
877 case 16:
878 return isStore ? AArch64::STRHHui : AArch64::LDRHHui;
879 case 32:
880 return isStore ? AArch64::STRWui : AArch64::LDRWui;
881 case 64:
882 return isStore ? AArch64::STRXui : AArch64::LDRXui;
883 }
884 break;
885 case AArch64::FPRRegBankID:
886 switch (OpSize) {
887 case 8:
888 return isStore ? AArch64::STRBui : AArch64::LDRBui;
889 case 16:
890 return isStore ? AArch64::STRHui : AArch64::LDRHui;
891 case 32:
892 return isStore ? AArch64::STRSui : AArch64::LDRSui;
893 case 64:
894 return isStore ? AArch64::STRDui : AArch64::LDRDui;
895 case 128:
896 return isStore ? AArch64::STRQui : AArch64::LDRQui;
897 }
898 break;
899 }
900 return GenericOpc;
901}
902
903/// Helper function for selectCopy. Inserts a subregister copy from \p SrcReg
904/// to \p *To.
905///
906/// E.g "To = COPY SrcReg:SubReg"
908 const RegisterBankInfo &RBI, Register SrcReg,
909 const TargetRegisterClass *To, unsigned SubReg) {
910 assert(SrcReg.isValid() && "Expected a valid source register?");
911 assert(To && "Destination register class cannot be null");
912 assert(SubReg && "Expected a valid subregister");
913
914 MachineIRBuilder MIB(I);
915 auto SubRegCopy =
916 MIB.buildInstr(TargetOpcode::COPY, {To}, {}).addReg(SrcReg, 0, SubReg);
917 MachineOperand &RegOp = I.getOperand(1);
918 RegOp.setReg(SubRegCopy.getReg(0));
919
920 // It's possible that the destination register won't be constrained. Make
921 // sure that happens.
922 if (!I.getOperand(0).getReg().isPhysical())
923 RBI.constrainGenericRegister(I.getOperand(0).getReg(), *To, MRI);
924
925 return true;
926}
927
928/// Helper function to get the source and destination register classes for a
929/// copy. Returns a std::pair containing the source register class for the
930/// copy, and the destination register class for the copy. If a register class
931/// cannot be determined, then it will be nullptr.
932static std::pair<const TargetRegisterClass *, const TargetRegisterClass *>
935 const RegisterBankInfo &RBI) {
936 Register DstReg = I.getOperand(0).getReg();
937 Register SrcReg = I.getOperand(1).getReg();
938 const RegisterBank &DstRegBank = *RBI.getRegBank(DstReg, MRI, TRI);
939 const RegisterBank &SrcRegBank = *RBI.getRegBank(SrcReg, MRI, TRI);
940 unsigned DstSize = RBI.getSizeInBits(DstReg, MRI, TRI);
941 unsigned SrcSize = RBI.getSizeInBits(SrcReg, MRI, TRI);
942
943 // Special casing for cross-bank copies of s1s. We can technically represent
944 // a 1-bit value with any size of register. The minimum size for a GPR is 32
945 // bits. So, we need to put the FPR on 32 bits as well.
946 //
947 // FIXME: I'm not sure if this case holds true outside of copies. If it does,
948 // then we can pull it into the helpers that get the appropriate class for a
949 // register bank. Or make a new helper that carries along some constraint
950 // information.
951 if (SrcRegBank != DstRegBank && (DstSize == 1 && SrcSize == 1))
952 SrcSize = DstSize = 32;
953
954 return {getMinClassForRegBank(SrcRegBank, SrcSize, true),
955 getMinClassForRegBank(DstRegBank, DstSize, true)};
956}
957
958// FIXME: We need some sort of API in RBI/TRI to allow generic code to
959// constrain operands of simple instructions given a TargetRegisterClass
960// and LLT
962 const RegisterBankInfo &RBI) {
963 for (MachineOperand &MO : I.operands()) {
964 if (!MO.isReg())
965 continue;
966 Register Reg = MO.getReg();
967 if (!Reg)
968 continue;
969 if (Reg.isPhysical())
970 continue;
971 LLT Ty = MRI.getType(Reg);
972 const RegClassOrRegBank &RegClassOrBank = MRI.getRegClassOrRegBank(Reg);
973 const TargetRegisterClass *RC =
974 RegClassOrBank.dyn_cast<const TargetRegisterClass *>();
975 if (!RC) {
976 const RegisterBank &RB = *RegClassOrBank.get<const RegisterBank *>();
977 RC = getRegClassForTypeOnBank(Ty, RB);
978 if (!RC) {
980 dbgs() << "Warning: DBG_VALUE operand has unexpected size/bank\n");
981 break;
982 }
983 }
984 RBI.constrainGenericRegister(Reg, *RC, MRI);
985 }
986
987 return true;
988}
989
992 const RegisterBankInfo &RBI) {
993 Register DstReg = I.getOperand(0).getReg();
994 Register SrcReg = I.getOperand(1).getReg();
995 const RegisterBank &DstRegBank = *RBI.getRegBank(DstReg, MRI, TRI);
996 const RegisterBank &SrcRegBank = *RBI.getRegBank(SrcReg, MRI, TRI);
997
998 // Find the correct register classes for the source and destination registers.
999 const TargetRegisterClass *SrcRC;
1000 const TargetRegisterClass *DstRC;
1001 std::tie(SrcRC, DstRC) = getRegClassesForCopy(I, TII, MRI, TRI, RBI);
1002
1003 if (!DstRC) {
1004 LLVM_DEBUG(dbgs() << "Unexpected dest size "
1005 << RBI.getSizeInBits(DstReg, MRI, TRI) << '\n');
1006 return false;
1007 }
1008
1009 // Is this a copy? If so, then we may need to insert a subregister copy.
1010 if (I.isCopy()) {
1011 // Yes. Check if there's anything to fix up.
1012 if (!SrcRC) {
1013 LLVM_DEBUG(dbgs() << "Couldn't determine source register class\n");
1014 return false;
1015 }
1016
1017 unsigned SrcSize = TRI.getRegSizeInBits(*SrcRC);
1018 unsigned DstSize = TRI.getRegSizeInBits(*DstRC);
1019 unsigned SubReg;
1020
1021 // If the source bank doesn't support a subregister copy small enough,
1022 // then we first need to copy to the destination bank.
1023 if (getMinSizeForRegBank(SrcRegBank) > DstSize) {
1024 const TargetRegisterClass *DstTempRC =
1025 getMinClassForRegBank(DstRegBank, SrcSize, /* GetAllRegSet */ true);
1026 getSubRegForClass(DstRC, TRI, SubReg);
1027
1028 MachineIRBuilder MIB(I);
1029 auto Copy = MIB.buildCopy({DstTempRC}, {SrcReg});
1030 copySubReg(I, MRI, RBI, Copy.getReg(0), DstRC, SubReg);
1031 } else if (SrcSize > DstSize) {
1032 // If the source register is bigger than the destination we need to
1033 // perform a subregister copy.
1034 const TargetRegisterClass *SubRegRC =
1035 getMinClassForRegBank(SrcRegBank, DstSize, /* GetAllRegSet */ true);
1036 getSubRegForClass(SubRegRC, TRI, SubReg);
1037 copySubReg(I, MRI, RBI, SrcReg, DstRC, SubReg);
1038 } else if (DstSize > SrcSize) {
1039 // If the destination register is bigger than the source we need to do
1040 // a promotion using SUBREG_TO_REG.
1041 const TargetRegisterClass *PromotionRC =
1042 getMinClassForRegBank(SrcRegBank, DstSize, /* GetAllRegSet */ true);
1043 getSubRegForClass(SrcRC, TRI, SubReg);
1044
1045 Register PromoteReg = MRI.createVirtualRegister(PromotionRC);
1046 BuildMI(*I.getParent(), I, I.getDebugLoc(),
1047 TII.get(AArch64::SUBREG_TO_REG), PromoteReg)
1048 .addImm(0)
1049 .addUse(SrcReg)
1050 .addImm(SubReg);
1051 MachineOperand &RegOp = I.getOperand(1);
1052 RegOp.setReg(PromoteReg);
1053 }
1054
1055 // If the destination is a physical register, then there's nothing to
1056 // change, so we're done.
1057 if (DstReg.isPhysical())
1058 return true;
1059 }
1060
1061 // No need to constrain SrcReg. It will get constrained when we hit another
1062 // of its use or its defs. Copies do not have constraints.
1063 if (!RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
1064 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
1065 << " operand\n");
1066 return false;
1067 }
1068
1069 // If this a GPR ZEXT that we want to just reduce down into a copy.
1070 // The sizes will be mismatched with the source < 32b but that's ok.
1071 if (I.getOpcode() == TargetOpcode::G_ZEXT) {
1072 I.setDesc(TII.get(AArch64::COPY));
1073 assert(SrcRegBank.getID() == AArch64::GPRRegBankID);
1074 return selectCopy(I, TII, MRI, TRI, RBI);
1075 }
1076
1077 I.setDesc(TII.get(AArch64::COPY));
1078 return true;
1079}
1080
1081static unsigned selectFPConvOpc(unsigned GenericOpc, LLT DstTy, LLT SrcTy) {
1082 if (!DstTy.isScalar() || !SrcTy.isScalar())
1083 return GenericOpc;
1084
1085 const unsigned DstSize = DstTy.getSizeInBits();
1086 const unsigned SrcSize = SrcTy.getSizeInBits();
1087
1088 switch (DstSize) {
1089 case 32:
1090 switch (SrcSize) {
1091 case 32:
1092 switch (GenericOpc) {
1093 case TargetOpcode::G_SITOFP:
1094 return AArch64::SCVTFUWSri;
1095 case TargetOpcode::G_UITOFP:
1096 return AArch64::UCVTFUWSri;
1097 case TargetOpcode::G_FPTOSI:
1098 return AArch64::FCVTZSUWSr;
1099 case TargetOpcode::G_FPTOUI:
1100 return AArch64::FCVTZUUWSr;
1101 default:
1102 return GenericOpc;
1103 }
1104 case 64:
1105 switch (GenericOpc) {
1106 case TargetOpcode::G_SITOFP:
1107 return AArch64::SCVTFUXSri;
1108 case TargetOpcode::G_UITOFP:
1109 return AArch64::UCVTFUXSri;
1110 case TargetOpcode::G_FPTOSI:
1111 return AArch64::FCVTZSUWDr;
1112 case TargetOpcode::G_FPTOUI:
1113 return AArch64::FCVTZUUWDr;
1114 default:
1115 return GenericOpc;
1116 }
1117 default:
1118 return GenericOpc;
1119 }
1120 case 64:
1121 switch (SrcSize) {
1122 case 32:
1123 switch (GenericOpc) {
1124 case TargetOpcode::G_SITOFP:
1125 return AArch64::SCVTFUWDri;
1126 case TargetOpcode::G_UITOFP:
1127 return AArch64::UCVTFUWDri;
1128 case TargetOpcode::G_FPTOSI:
1129 return AArch64::FCVTZSUXSr;
1130 case TargetOpcode::G_FPTOUI:
1131 return AArch64::FCVTZUUXSr;
1132 default:
1133 return GenericOpc;
1134 }
1135 case 64:
1136 switch (GenericOpc) {
1137 case TargetOpcode::G_SITOFP:
1138 return AArch64::SCVTFUXDri;
1139 case TargetOpcode::G_UITOFP:
1140 return AArch64::UCVTFUXDri;
1141 case TargetOpcode::G_FPTOSI:
1142 return AArch64::FCVTZSUXDr;
1143 case TargetOpcode::G_FPTOUI:
1144 return AArch64::FCVTZUUXDr;
1145 default:
1146 return GenericOpc;
1147 }
1148 default:
1149 return GenericOpc;
1150 }
1151 default:
1152 return GenericOpc;
1153 };
1154 return GenericOpc;
1155}
1156
1158AArch64InstructionSelector::emitSelect(Register Dst, Register True,
1160 MachineIRBuilder &MIB) const {
1161 MachineRegisterInfo &MRI = *MIB.getMRI();
1162 assert(RBI.getRegBank(False, MRI, TRI)->getID() ==
1163 RBI.getRegBank(True, MRI, TRI)->getID() &&
1164 "Expected both select operands to have the same regbank?");
1165 LLT Ty = MRI.getType(True);
1166 if (Ty.isVector())
1167 return nullptr;
1168 const unsigned Size = Ty.getSizeInBits();
1169 assert((Size == 32 || Size == 64) &&
1170 "Expected 32 bit or 64 bit select only?");
1171 const bool Is32Bit = Size == 32;
1172 if (RBI.getRegBank(True, MRI, TRI)->getID() != AArch64::GPRRegBankID) {
1173 unsigned Opc = Is32Bit ? AArch64::FCSELSrrr : AArch64::FCSELDrrr;
1174 auto FCSel = MIB.buildInstr(Opc, {Dst}, {True, False}).addImm(CC);
1176 return &*FCSel;
1177 }
1178
1179 // By default, we'll try and emit a CSEL.
1180 unsigned Opc = Is32Bit ? AArch64::CSELWr : AArch64::CSELXr;
1181 bool Optimized = false;
1182 auto TryFoldBinOpIntoSelect = [&Opc, Is32Bit, &CC, &MRI,
1183 &Optimized](Register &Reg, Register &OtherReg,
1184 bool Invert) {
1185 if (Optimized)
1186 return false;
1187
1188 // Attempt to fold:
1189 //
1190 // %sub = G_SUB 0, %x
1191 // %select = G_SELECT cc, %reg, %sub
1192 //
1193 // Into:
1194 // %select = CSNEG %reg, %x, cc
1195 Register MatchReg;
1196 if (mi_match(Reg, MRI, m_Neg(m_Reg(MatchReg)))) {
1197 Opc = Is32Bit ? AArch64::CSNEGWr : AArch64::CSNEGXr;
1198 Reg = MatchReg;
1199 if (Invert) {
1201 std::swap(Reg, OtherReg);
1202 }
1203 return true;
1204 }
1205
1206 // Attempt to fold:
1207 //
1208 // %xor = G_XOR %x, -1
1209 // %select = G_SELECT cc, %reg, %xor
1210 //
1211 // Into:
1212 // %select = CSINV %reg, %x, cc
1213 if (mi_match(Reg, MRI, m_Not(m_Reg(MatchReg)))) {
1214 Opc = Is32Bit ? AArch64::CSINVWr : AArch64::CSINVXr;
1215 Reg = MatchReg;
1216 if (Invert) {
1218 std::swap(Reg, OtherReg);
1219 }
1220 return true;
1221 }
1222
1223 // Attempt to fold:
1224 //
1225 // %add = G_ADD %x, 1
1226 // %select = G_SELECT cc, %reg, %add
1227 //
1228 // Into:
1229 // %select = CSINC %reg, %x, cc
1230 if (mi_match(Reg, MRI,
1231 m_any_of(m_GAdd(m_Reg(MatchReg), m_SpecificICst(1)),
1232 m_GPtrAdd(m_Reg(MatchReg), m_SpecificICst(1))))) {
1233 Opc = Is32Bit ? AArch64::CSINCWr : AArch64::CSINCXr;
1234 Reg = MatchReg;
1235 if (Invert) {
1237 std::swap(Reg, OtherReg);
1238 }
1239 return true;
1240 }
1241
1242 return false;
1243 };
1244
1245 // Helper lambda which tries to use CSINC/CSINV for the instruction when its
1246 // true/false values are constants.
1247 // FIXME: All of these patterns already exist in tablegen. We should be
1248 // able to import these.
1249 auto TryOptSelectCst = [&Opc, &True, &False, &CC, Is32Bit, &MRI,
1250 &Optimized]() {
1251 if (Optimized)
1252 return false;
1253 auto TrueCst = getIConstantVRegValWithLookThrough(True, MRI);
1254 auto FalseCst = getIConstantVRegValWithLookThrough(False, MRI);
1255 if (!TrueCst && !FalseCst)
1256 return false;
1257
1258 Register ZReg = Is32Bit ? AArch64::WZR : AArch64::XZR;
1259 if (TrueCst && FalseCst) {
1260 int64_t T = TrueCst->Value.getSExtValue();
1261 int64_t F = FalseCst->Value.getSExtValue();
1262
1263 if (T == 0 && F == 1) {
1264 // G_SELECT cc, 0, 1 -> CSINC zreg, zreg, cc
1265 Opc = Is32Bit ? AArch64::CSINCWr : AArch64::CSINCXr;
1266 True = ZReg;
1267 False = ZReg;
1268 return true;
1269 }
1270
1271 if (T == 0 && F == -1) {
1272 // G_SELECT cc 0, -1 -> CSINV zreg, zreg cc
1273 Opc = Is32Bit ? AArch64::CSINVWr : AArch64::CSINVXr;
1274 True = ZReg;
1275 False = ZReg;
1276 return true;
1277 }
1278 }
1279
1280 if (TrueCst) {
1281 int64_t T = TrueCst->Value.getSExtValue();
1282 if (T == 1) {
1283 // G_SELECT cc, 1, f -> CSINC f, zreg, inv_cc
1284 Opc = Is32Bit ? AArch64::CSINCWr : AArch64::CSINCXr;
1285 True = False;
1286 False = ZReg;
1288 return true;
1289 }
1290
1291 if (T == -1) {
1292 // G_SELECT cc, -1, f -> CSINV f, zreg, inv_cc
1293 Opc = Is32Bit ? AArch64::CSINVWr : AArch64::CSINVXr;
1294 True = False;
1295 False = ZReg;
1297 return true;
1298 }
1299 }
1300
1301 if (FalseCst) {
1302 int64_t F = FalseCst->Value.getSExtValue();
1303 if (F == 1) {
1304 // G_SELECT cc, t, 1 -> CSINC t, zreg, cc
1305 Opc = Is32Bit ? AArch64::CSINCWr : AArch64::CSINCXr;
1306 False = ZReg;
1307 return true;
1308 }
1309
1310 if (F == -1) {
1311 // G_SELECT cc, t, -1 -> CSINC t, zreg, cc
1312 Opc = Is32Bit ? AArch64::CSINVWr : AArch64::CSINVXr;
1313 False = ZReg;
1314 return true;
1315 }
1316 }
1317 return false;
1318 };
1319
1320 Optimized |= TryFoldBinOpIntoSelect(False, True, /*Invert = */ false);
1321 Optimized |= TryFoldBinOpIntoSelect(True, False, /*Invert = */ true);
1322 Optimized |= TryOptSelectCst();
1323 auto SelectInst = MIB.buildInstr(Opc, {Dst}, {True, False}).addImm(CC);
1325 return &*SelectInst;
1326}
1327
1329 switch (P) {
1330 default:
1331 llvm_unreachable("Unknown condition code!");
1332 case CmpInst::ICMP_NE:
1333 return AArch64CC::NE;
1334 case CmpInst::ICMP_EQ:
1335 return AArch64CC::EQ;
1336 case CmpInst::ICMP_SGT:
1337 return AArch64CC::GT;
1338 case CmpInst::ICMP_SGE:
1339 return AArch64CC::GE;
1340 case CmpInst::ICMP_SLT:
1341 return AArch64CC::LT;
1342 case CmpInst::ICMP_SLE:
1343 return AArch64CC::LE;
1344 case CmpInst::ICMP_UGT:
1345 return AArch64CC::HI;
1346 case CmpInst::ICMP_UGE:
1347 return AArch64CC::HS;
1348 case CmpInst::ICMP_ULT:
1349 return AArch64CC::LO;
1350 case CmpInst::ICMP_ULE:
1351 return AArch64CC::LS;
1352 }
1353}
1354
1355/// changeFPCCToORAArch64CC - Convert an IR fp condition code to an AArch64 CC.
1357 AArch64CC::CondCode &CondCode,
1358 AArch64CC::CondCode &CondCode2) {
1359 CondCode2 = AArch64CC::AL;
1360 switch (CC) {
1361 default:
1362 llvm_unreachable("Unknown FP condition!");
1363 case CmpInst::FCMP_OEQ:
1364 CondCode = AArch64CC::EQ;
1365 break;
1366 case CmpInst::FCMP_OGT:
1367 CondCode = AArch64CC::GT;
1368 break;
1369 case CmpInst::FCMP_OGE:
1370 CondCode = AArch64CC::GE;
1371 break;
1372 case CmpInst::FCMP_OLT:
1373 CondCode = AArch64CC::MI;
1374 break;
1375 case CmpInst::FCMP_OLE:
1376 CondCode = AArch64CC::LS;
1377 break;
1378 case CmpInst::FCMP_ONE:
1379 CondCode = AArch64CC::MI;
1380 CondCode2 = AArch64CC::GT;
1381 break;
1382 case CmpInst::FCMP_ORD:
1383 CondCode = AArch64CC::VC;
1384 break;
1385 case CmpInst::FCMP_UNO:
1386 CondCode = AArch64CC::VS;
1387 break;
1388 case CmpInst::FCMP_UEQ:
1389 CondCode = AArch64CC::EQ;
1390 CondCode2 = AArch64CC::VS;
1391 break;
1392 case CmpInst::FCMP_UGT:
1393 CondCode = AArch64CC::HI;
1394 break;
1395 case CmpInst::FCMP_UGE:
1396 CondCode = AArch64CC::PL;
1397 break;
1398 case CmpInst::FCMP_ULT:
1399 CondCode = AArch64CC::LT;
1400 break;
1401 case CmpInst::FCMP_ULE:
1402 CondCode = AArch64CC::LE;
1403 break;
1404 case CmpInst::FCMP_UNE:
1405 CondCode = AArch64CC::NE;
1406 break;
1407 }
1408}
1409
1410/// Convert an IR fp condition code to an AArch64 CC.
1411/// This differs from changeFPCCToAArch64CC in that it returns cond codes that
1412/// should be AND'ed instead of OR'ed.
1414 AArch64CC::CondCode &CondCode,
1415 AArch64CC::CondCode &CondCode2) {
1416 CondCode2 = AArch64CC::AL;
1417 switch (CC) {
1418 default:
1419 changeFPCCToORAArch64CC(CC, CondCode, CondCode2);
1420 assert(CondCode2 == AArch64CC::AL);
1421 break;
1422 case CmpInst::FCMP_ONE:
1423 // (a one b)
1424 // == ((a olt b) || (a ogt b))
1425 // == ((a ord b) && (a une b))
1426 CondCode = AArch64CC::VC;
1427 CondCode2 = AArch64CC::NE;
1428 break;
1429 case CmpInst::FCMP_UEQ:
1430 // (a ueq b)
1431 // == ((a uno b) || (a oeq b))
1432 // == ((a ule b) && (a uge b))
1433 CondCode = AArch64CC::PL;
1434 CondCode2 = AArch64CC::LE;
1435 break;
1436 }
1437}
1438
1439/// Return a register which can be used as a bit to test in a TB(N)Z.
1440static Register getTestBitReg(Register Reg, uint64_t &Bit, bool &Invert,
1442 assert(Reg.isValid() && "Expected valid register!");
1443 bool HasZext = false;
1444 while (MachineInstr *MI = getDefIgnoringCopies(Reg, MRI)) {
1445 unsigned Opc = MI->getOpcode();
1446
1447 if (!MI->getOperand(0).isReg() ||
1448 !MRI.hasOneNonDBGUse(MI->getOperand(0).getReg()))
1449 break;
1450
1451 // (tbz (any_ext x), b) -> (tbz x, b) if we don't use the extended bits.
1452 //
1453 // (tbz (trunc x), b) -> (tbz x, b) is always safe, because the bit number
1454 // on the truncated x is the same as the bit number on x.
1455 if (Opc == TargetOpcode::G_ANYEXT || Opc == TargetOpcode::G_ZEXT ||
1456 Opc == TargetOpcode::G_TRUNC) {
1457 if (Opc == TargetOpcode::G_ZEXT)
1458 HasZext = true;
1459
1460 Register NextReg = MI->getOperand(1).getReg();
1461 // Did we find something worth folding?
1462 if (!NextReg.isValid() || !MRI.hasOneNonDBGUse(NextReg))
1463 break;
1464
1465 // NextReg is worth folding. Keep looking.
1466 Reg = NextReg;
1467 continue;
1468 }
1469
1470 // Attempt to find a suitable operation with a constant on one side.
1471 std::optional<uint64_t> C;
1472 Register TestReg;
1473 switch (Opc) {
1474 default:
1475 break;
1476 case TargetOpcode::G_AND:
1477 case TargetOpcode::G_XOR: {
1478 TestReg = MI->getOperand(1).getReg();
1479 Register ConstantReg = MI->getOperand(2).getReg();
1480 auto VRegAndVal = getIConstantVRegValWithLookThrough(ConstantReg, MRI);
1481 if (!VRegAndVal) {
1482 // AND commutes, check the other side for a constant.
1483 // FIXME: Can we canonicalize the constant so that it's always on the
1484 // same side at some point earlier?
1485 std::swap(ConstantReg, TestReg);
1486 VRegAndVal = getIConstantVRegValWithLookThrough(ConstantReg, MRI);
1487 }
1488 if (VRegAndVal) {
1489 if (HasZext)
1490 C = VRegAndVal->Value.getZExtValue();
1491 else
1492 C = VRegAndVal->Value.getSExtValue();
1493 }
1494 break;
1495 }
1496 case TargetOpcode::G_ASHR:
1497 case TargetOpcode::G_LSHR:
1498 case TargetOpcode::G_SHL: {
1499 TestReg = MI->getOperand(1).getReg();
1500 auto VRegAndVal =
1501 getIConstantVRegValWithLookThrough(MI->getOperand(2).getReg(), MRI);
1502 if (VRegAndVal)
1503 C = VRegAndVal->Value.getSExtValue();
1504 break;
1505 }
1506 }
1507
1508 // Didn't find a constant or viable register. Bail out of the loop.
1509 if (!C || !TestReg.isValid())
1510 break;
1511
1512 // We found a suitable instruction with a constant. Check to see if we can
1513 // walk through the instruction.
1514 Register NextReg;
1515 unsigned TestRegSize = MRI.getType(TestReg).getSizeInBits();
1516 switch (Opc) {
1517 default:
1518 break;
1519 case TargetOpcode::G_AND:
1520 // (tbz (and x, m), b) -> (tbz x, b) when the b-th bit of m is set.
1521 if ((*C >> Bit) & 1)
1522 NextReg = TestReg;
1523 break;
1524 case TargetOpcode::G_SHL:
1525 // (tbz (shl x, c), b) -> (tbz x, b-c) when b-c is positive and fits in
1526 // the type of the register.
1527 if (*C <= Bit && (Bit - *C) < TestRegSize) {
1528 NextReg = TestReg;
1529 Bit = Bit - *C;
1530 }
1531 break;
1532 case TargetOpcode::G_ASHR:
1533 // (tbz (ashr x, c), b) -> (tbz x, b+c) or (tbz x, msb) if b+c is > # bits
1534 // in x
1535 NextReg = TestReg;
1536 Bit = Bit + *C;
1537 if (Bit >= TestRegSize)
1538 Bit = TestRegSize - 1;
1539 break;
1540 case TargetOpcode::G_LSHR:
1541 // (tbz (lshr x, c), b) -> (tbz x, b+c) when b + c is < # bits in x
1542 if ((Bit + *C) < TestRegSize) {
1543 NextReg = TestReg;
1544 Bit = Bit + *C;
1545 }
1546 break;
1547 case TargetOpcode::G_XOR:
1548 // We can walk through a G_XOR by inverting whether we use tbz/tbnz when
1549 // appropriate.
1550 //
1551 // e.g. If x' = xor x, c, and the b-th bit is set in c then
1552 //
1553 // tbz x', b -> tbnz x, b
1554 //
1555 // Because x' only has the b-th bit set if x does not.
1556 if ((*C >> Bit) & 1)
1557 Invert = !Invert;
1558 NextReg = TestReg;
1559 break;
1560 }
1561
1562 // Check if we found anything worth folding.
1563 if (!NextReg.isValid())
1564 return Reg;
1565 Reg = NextReg;
1566 }
1567
1568 return Reg;
1569}
1570
1571MachineInstr *AArch64InstructionSelector::emitTestBit(
1572 Register TestReg, uint64_t Bit, bool IsNegative, MachineBasicBlock *DstMBB,
1573 MachineIRBuilder &MIB) const {
1574 assert(TestReg.isValid());
1575 assert(ProduceNonFlagSettingCondBr &&
1576 "Cannot emit TB(N)Z with speculation tracking!");
1577 MachineRegisterInfo &MRI = *MIB.getMRI();
1578
1579 // Attempt to optimize the test bit by walking over instructions.
1580 TestReg = getTestBitReg(TestReg, Bit, IsNegative, MRI);
1581 LLT Ty = MRI.getType(TestReg);
1582 unsigned Size = Ty.getSizeInBits();
1583 assert(!Ty.isVector() && "Expected a scalar!");
1584 assert(Bit < 64 && "Bit is too large!");
1585
1586 // When the test register is a 64-bit register, we have to narrow to make
1587 // TBNZW work.
1588 bool UseWReg = Bit < 32;
1589 unsigned NecessarySize = UseWReg ? 32 : 64;
1590 if (Size != NecessarySize)
1591 TestReg = moveScalarRegClass(
1592 TestReg, UseWReg ? AArch64::GPR32RegClass : AArch64::GPR64RegClass,
1593 MIB);
1594
1595 static const unsigned OpcTable[2][2] = {{AArch64::TBZX, AArch64::TBNZX},
1596 {AArch64::TBZW, AArch64::TBNZW}};
1597 unsigned Opc = OpcTable[UseWReg][IsNegative];
1598 auto TestBitMI =
1599 MIB.buildInstr(Opc).addReg(TestReg).addImm(Bit).addMBB(DstMBB);
1600 constrainSelectedInstRegOperands(*TestBitMI, TII, TRI, RBI);
1601 return &*TestBitMI;
1602}
1603
1604bool AArch64InstructionSelector::tryOptAndIntoCompareBranch(
1605 MachineInstr &AndInst, bool Invert, MachineBasicBlock *DstMBB,
1606 MachineIRBuilder &MIB) const {
1607 assert(AndInst.getOpcode() == TargetOpcode::G_AND && "Expected G_AND only?");
1608 // Given something like this:
1609 //
1610 // %x = ...Something...
1611 // %one = G_CONSTANT i64 1
1612 // %zero = G_CONSTANT i64 0
1613 // %and = G_AND %x, %one
1614 // %cmp = G_ICMP intpred(ne), %and, %zero
1615 // %cmp_trunc = G_TRUNC %cmp
1616 // G_BRCOND %cmp_trunc, %bb.3
1617 //
1618 // We want to try and fold the AND into the G_BRCOND and produce either a
1619 // TBNZ (when we have intpred(ne)) or a TBZ (when we have intpred(eq)).
1620 //
1621 // In this case, we'd get
1622 //
1623 // TBNZ %x %bb.3
1624 //
1625
1626 // Check if the AND has a constant on its RHS which we can use as a mask.
1627 // If it's a power of 2, then it's the same as checking a specific bit.
1628 // (e.g, ANDing with 8 == ANDing with 000...100 == testing if bit 3 is set)
1629 auto MaybeBit = getIConstantVRegValWithLookThrough(
1630 AndInst.getOperand(2).getReg(), *MIB.getMRI());
1631 if (!MaybeBit)
1632 return false;
1633
1634 int32_t Bit = MaybeBit->Value.exactLogBase2();
1635 if (Bit < 0)
1636 return false;
1637
1638 Register TestReg = AndInst.getOperand(1).getReg();
1639
1640 // Emit a TB(N)Z.
1641 emitTestBit(TestReg, Bit, Invert, DstMBB, MIB);
1642 return true;
1643}
1644
1645MachineInstr *AArch64InstructionSelector::emitCBZ(Register CompareReg,
1646 bool IsNegative,
1647 MachineBasicBlock *DestMBB,
1648 MachineIRBuilder &MIB) const {
1649 assert(ProduceNonFlagSettingCondBr && "CBZ does not set flags!");
1650 MachineRegisterInfo &MRI = *MIB.getMRI();
1651 assert(RBI.getRegBank(CompareReg, MRI, TRI)->getID() ==
1652 AArch64::GPRRegBankID &&
1653 "Expected GPRs only?");
1654 auto Ty = MRI.getType(CompareReg);
1655 unsigned Width = Ty.getSizeInBits();
1656 assert(!Ty.isVector() && "Expected scalar only?");
1657 assert(Width <= 64 && "Expected width to be at most 64?");
1658 static const unsigned OpcTable[2][2] = {{AArch64::CBZW, AArch64::CBZX},
1659 {AArch64::CBNZW, AArch64::CBNZX}};
1660 unsigned Opc = OpcTable[IsNegative][Width == 64];
1661 auto BranchMI = MIB.buildInstr(Opc, {}, {CompareReg}).addMBB(DestMBB);
1662 constrainSelectedInstRegOperands(*BranchMI, TII, TRI, RBI);
1663 return &*BranchMI;
1664}
1665
1666bool AArch64InstructionSelector::selectCompareBranchFedByFCmp(
1667 MachineInstr &I, MachineInstr &FCmp, MachineIRBuilder &MIB) const {
1668 assert(FCmp.getOpcode() == TargetOpcode::G_FCMP);
1669 assert(I.getOpcode() == TargetOpcode::G_BRCOND);
1670 // Unfortunately, the mapping of LLVM FP CC's onto AArch64 CC's isn't
1671 // totally clean. Some of them require two branches to implement.
1672 auto Pred = (CmpInst::Predicate)FCmp.getOperand(1).getPredicate();
1673 emitFPCompare(FCmp.getOperand(2).getReg(), FCmp.getOperand(3).getReg(), MIB,
1674 Pred);
1675 AArch64CC::CondCode CC1, CC2;
1676 changeFCMPPredToAArch64CC(static_cast<CmpInst::Predicate>(Pred), CC1, CC2);
1677 MachineBasicBlock *DestMBB = I.getOperand(1).getMBB();
1678 MIB.buildInstr(AArch64::Bcc, {}, {}).addImm(CC1).addMBB(DestMBB);
1679 if (CC2 != AArch64CC::AL)
1680 MIB.buildInstr(AArch64::Bcc, {}, {}).addImm(CC2).addMBB(DestMBB);
1681 I.eraseFromParent();
1682 return true;
1683}
1684
1685bool AArch64InstructionSelector::tryOptCompareBranchFedByICmp(
1686 MachineInstr &I, MachineInstr &ICmp, MachineIRBuilder &MIB) const {
1687 assert(ICmp.getOpcode() == TargetOpcode::G_ICMP);
1688 assert(I.getOpcode() == TargetOpcode::G_BRCOND);
1689 // Attempt to optimize the G_BRCOND + G_ICMP into a TB(N)Z/CB(N)Z.
1690 //
1691 // Speculation tracking/SLH assumes that optimized TB(N)Z/CB(N)Z
1692 // instructions will not be produced, as they are conditional branch
1693 // instructions that do not set flags.
1694 if (!ProduceNonFlagSettingCondBr)
1695 return false;
1696
1697 MachineRegisterInfo &MRI = *MIB.getMRI();
1698 MachineBasicBlock *DestMBB = I.getOperand(1).getMBB();
1699 auto Pred =
1700 static_cast<CmpInst::Predicate>(ICmp.getOperand(1).getPredicate());
1701 Register LHS = ICmp.getOperand(2).getReg();
1702 Register RHS = ICmp.getOperand(3).getReg();
1703
1704 // We're allowed to emit a TB(N)Z/CB(N)Z. Try to do that.
1705 auto VRegAndVal = getIConstantVRegValWithLookThrough(RHS, MRI);
1706 MachineInstr *AndInst = getOpcodeDef(TargetOpcode::G_AND, LHS, MRI);
1707
1708 // When we can emit a TB(N)Z, prefer that.
1709 //
1710 // Handle non-commutative condition codes first.
1711 // Note that we don't want to do this when we have a G_AND because it can
1712 // become a tst. The tst will make the test bit in the TB(N)Z redundant.
1713 if (VRegAndVal && !AndInst) {
1714 int64_t C = VRegAndVal->Value.getSExtValue();
1715
1716 // When we have a greater-than comparison, we can just test if the msb is
1717 // zero.
1718 if (C == -1 && Pred == CmpInst::ICMP_SGT) {
1719 uint64_t Bit = MRI.getType(LHS).getSizeInBits() - 1;
1720 emitTestBit(LHS, Bit, /*IsNegative = */ false, DestMBB, MIB);
1721 I.eraseFromParent();
1722 return true;
1723 }
1724
1725 // When we have a less than comparison, we can just test if the msb is not
1726 // zero.
1727 if (C == 0 && Pred == CmpInst::ICMP_SLT) {
1728 uint64_t Bit = MRI.getType(LHS).getSizeInBits() - 1;
1729 emitTestBit(LHS, Bit, /*IsNegative = */ true, DestMBB, MIB);
1730 I.eraseFromParent();
1731 return true;
1732 }
1733
1734 // Inversely, if we have a signed greater-than-or-equal comparison to zero,
1735 // we can test if the msb is zero.
1736 if (C == 0 && Pred == CmpInst::ICMP_SGE) {
1737 uint64_t Bit = MRI.getType(LHS).getSizeInBits() - 1;
1738 emitTestBit(LHS, Bit, /*IsNegative = */ false, DestMBB, MIB);
1739 I.eraseFromParent();
1740 return true;
1741 }
1742 }
1743
1744 // Attempt to handle commutative condition codes. Right now, that's only
1745 // eq/ne.
1746 if (ICmpInst::isEquality(Pred)) {
1747 if (!VRegAndVal) {
1748 std::swap(RHS, LHS);
1749 VRegAndVal = getIConstantVRegValWithLookThrough(RHS, MRI);
1750 AndInst = getOpcodeDef(TargetOpcode::G_AND, LHS, MRI);
1751 }
1752
1753 if (VRegAndVal && VRegAndVal->Value == 0) {
1754 // If there's a G_AND feeding into this branch, try to fold it away by
1755 // emitting a TB(N)Z instead.
1756 //
1757 // Note: If we have LT, then it *is* possible to fold, but it wouldn't be
1758 // beneficial. When we have an AND and LT, we need a TST/ANDS, so folding
1759 // would be redundant.
1760 if (AndInst &&
1761 tryOptAndIntoCompareBranch(
1762 *AndInst, /*Invert = */ Pred == CmpInst::ICMP_NE, DestMBB, MIB)) {
1763 I.eraseFromParent();
1764 return true;
1765 }
1766
1767 // Otherwise, try to emit a CB(N)Z instead.
1768 auto LHSTy = MRI.getType(LHS);
1769 if (!LHSTy.isVector() && LHSTy.getSizeInBits() <= 64) {
1770 emitCBZ(LHS, /*IsNegative = */ Pred == CmpInst::ICMP_NE, DestMBB, MIB);
1771 I.eraseFromParent();
1772 return true;
1773 }
1774 }
1775 }
1776
1777 return false;
1778}
1779
1780bool AArch64InstructionSelector::selectCompareBranchFedByICmp(
1781 MachineInstr &I, MachineInstr &ICmp, MachineIRBuilder &MIB) const {
1782 assert(ICmp.getOpcode() == TargetOpcode::G_ICMP);
1783 assert(I.getOpcode() == TargetOpcode::G_BRCOND);
1784 if (tryOptCompareBranchFedByICmp(I, ICmp, MIB))
1785 return true;
1786
1787 // Couldn't optimize. Emit a compare + a Bcc.
1788 MachineBasicBlock *DestMBB = I.getOperand(1).getMBB();
1789 auto PredOp = ICmp.getOperand(1);
1790 emitIntegerCompare(ICmp.getOperand(2), ICmp.getOperand(3), PredOp, MIB);
1792 static_cast<CmpInst::Predicate>(PredOp.getPredicate()));
1793 MIB.buildInstr(AArch64::Bcc, {}, {}).addImm(CC).addMBB(DestMBB);
1794 I.eraseFromParent();
1795 return true;
1796}
1797
1798bool AArch64InstructionSelector::selectCompareBranch(
1800 Register CondReg = I.getOperand(0).getReg();
1801 MachineInstr *CCMI = MRI.getVRegDef(CondReg);
1802 // Try to select the G_BRCOND using whatever is feeding the condition if
1803 // possible.
1804 unsigned CCMIOpc = CCMI->getOpcode();
1805 if (CCMIOpc == TargetOpcode::G_FCMP)
1806 return selectCompareBranchFedByFCmp(I, *CCMI, MIB);
1807 if (CCMIOpc == TargetOpcode::G_ICMP)
1808 return selectCompareBranchFedByICmp(I, *CCMI, MIB);
1809
1810 // Speculation tracking/SLH assumes that optimized TB(N)Z/CB(N)Z
1811 // instructions will not be produced, as they are conditional branch
1812 // instructions that do not set flags.
1813 if (ProduceNonFlagSettingCondBr) {
1814 emitTestBit(CondReg, /*Bit = */ 0, /*IsNegative = */ true,
1815 I.getOperand(1).getMBB(), MIB);
1816 I.eraseFromParent();
1817 return true;
1818 }
1819
1820 // Can't emit TB(N)Z/CB(N)Z. Emit a tst + bcc instead.
1821 auto TstMI =
1822 MIB.buildInstr(AArch64::ANDSWri, {LLT::scalar(32)}, {CondReg}).addImm(1);
1824 auto Bcc = MIB.buildInstr(AArch64::Bcc)
1826 .addMBB(I.getOperand(1).getMBB());
1827 I.eraseFromParent();
1828 return constrainSelectedInstRegOperands(*Bcc, TII, TRI, RBI);
1829}
1830
1831/// Returns the element immediate value of a vector shift operand if found.
1832/// This needs to detect a splat-like operation, e.g. a G_BUILD_VECTOR.
1833static std::optional<int64_t> getVectorShiftImm(Register Reg,
1835 assert(MRI.getType(Reg).isVector() && "Expected a *vector* shift operand");
1836 MachineInstr *OpMI = MRI.getVRegDef(Reg);
1837 return getAArch64VectorSplatScalar(*OpMI, MRI);
1838}
1839
1840/// Matches and returns the shift immediate value for a SHL instruction given
1841/// a shift operand.
1842static std::optional<int64_t> getVectorSHLImm(LLT SrcTy, Register Reg,
1844 std::optional<int64_t> ShiftImm = getVectorShiftImm(Reg, MRI);
1845 if (!ShiftImm)
1846 return std::nullopt;
1847 // Check the immediate is in range for a SHL.
1848 int64_t Imm = *ShiftImm;
1849 if (Imm < 0)
1850 return std::nullopt;
1851 switch (SrcTy.getElementType().getSizeInBits()) {
1852 default:
1853 LLVM_DEBUG(dbgs() << "Unhandled element type for vector shift");
1854 return std::nullopt;
1855 case 8:
1856 if (Imm > 7)
1857 return std::nullopt;
1858 break;
1859 case 16:
1860 if (Imm > 15)
1861 return std::nullopt;
1862 break;
1863 case 32:
1864 if (Imm > 31)
1865 return std::nullopt;
1866 break;
1867 case 64:
1868 if (Imm > 63)
1869 return std::nullopt;
1870 break;
1871 }
1872 return Imm;
1873}
1874
1875bool AArch64InstructionSelector::selectVectorSHL(MachineInstr &I,
1877 assert(I.getOpcode() == TargetOpcode::G_SHL);
1878 Register DstReg = I.getOperand(0).getReg();
1879 const LLT Ty = MRI.getType(DstReg);
1880 Register Src1Reg = I.getOperand(1).getReg();
1881 Register Src2Reg = I.getOperand(2).getReg();
1882
1883 if (!Ty.isVector())
1884 return false;
1885
1886 // Check if we have a vector of constants on RHS that we can select as the
1887 // immediate form.
1888 std::optional<int64_t> ImmVal = getVectorSHLImm(Ty, Src2Reg, MRI);
1889
1890 unsigned Opc = 0;
1891 if (Ty == LLT::fixed_vector(2, 64)) {
1892 Opc = ImmVal ? AArch64::SHLv2i64_shift : AArch64::USHLv2i64;
1893 } else if (Ty == LLT::fixed_vector(4, 32)) {
1894 Opc = ImmVal ? AArch64::SHLv4i32_shift : AArch64::USHLv4i32;
1895 } else if (Ty == LLT::fixed_vector(2, 32)) {
1896 Opc = ImmVal ? AArch64::SHLv2i32_shift : AArch64::USHLv2i32;
1897 } else if (Ty == LLT::fixed_vector(4, 16)) {
1898 Opc = ImmVal ? AArch64::SHLv4i16_shift : AArch64::USHLv4i16;
1899 } else if (Ty == LLT::fixed_vector(8, 16)) {
1900 Opc = ImmVal ? AArch64::SHLv8i16_shift : AArch64::USHLv8i16;
1901 } else if (Ty == LLT::fixed_vector(16, 8)) {
1902 Opc = ImmVal ? AArch64::SHLv16i8_shift : AArch64::USHLv16i8;
1903 } else if (Ty == LLT::fixed_vector(8, 8)) {
1904 Opc = ImmVal ? AArch64::SHLv8i8_shift : AArch64::USHLv8i8;
1905 } else {
1906 LLVM_DEBUG(dbgs() << "Unhandled G_SHL type");
1907 return false;
1908 }
1909
1910 auto Shl = MIB.buildInstr(Opc, {DstReg}, {Src1Reg});
1911 if (ImmVal)
1912 Shl.addImm(*ImmVal);
1913 else
1914 Shl.addUse(Src2Reg);
1916 I.eraseFromParent();
1917 return true;
1918}
1919
1920bool AArch64InstructionSelector::selectVectorAshrLshr(
1922 assert(I.getOpcode() == TargetOpcode::G_ASHR ||
1923 I.getOpcode() == TargetOpcode::G_LSHR);
1924 Register DstReg = I.getOperand(0).getReg();
1925 const LLT Ty = MRI.getType(DstReg);
1926 Register Src1Reg = I.getOperand(1).getReg();
1927 Register Src2Reg = I.getOperand(2).getReg();
1928
1929 if (!Ty.isVector())
1930 return false;
1931
1932 bool IsASHR = I.getOpcode() == TargetOpcode::G_ASHR;
1933
1934 // We expect the immediate case to be lowered in the PostLegalCombiner to
1935 // AArch64ISD::VASHR or AArch64ISD::VLSHR equivalents.
1936
1937 // There is not a shift right register instruction, but the shift left
1938 // register instruction takes a signed value, where negative numbers specify a
1939 // right shift.
1940
1941 unsigned Opc = 0;
1942 unsigned NegOpc = 0;
1943 const TargetRegisterClass *RC =
1944 getRegClassForTypeOnBank(Ty, RBI.getRegBank(AArch64::FPRRegBankID));
1945 if (Ty == LLT::fixed_vector(2, 64)) {
1946 Opc = IsASHR ? AArch64::SSHLv2i64 : AArch64::USHLv2i64;
1947 NegOpc = AArch64::NEGv2i64;
1948 } else if (Ty == LLT::fixed_vector(4, 32)) {
1949 Opc = IsASHR ? AArch64::SSHLv4i32 : AArch64::USHLv4i32;
1950 NegOpc = AArch64::NEGv4i32;
1951 } else if (Ty == LLT::fixed_vector(2, 32)) {
1952 Opc = IsASHR ? AArch64::SSHLv2i32 : AArch64::USHLv2i32;
1953 NegOpc = AArch64::NEGv2i32;
1954 } else if (Ty == LLT::fixed_vector(4, 16)) {
1955 Opc = IsASHR ? AArch64::SSHLv4i16 : AArch64::USHLv4i16;
1956 NegOpc = AArch64::NEGv4i16;
1957 } else if (Ty == LLT::fixed_vector(8, 16)) {
1958 Opc = IsASHR ? AArch64::SSHLv8i16 : AArch64::USHLv8i16;
1959 NegOpc = AArch64::NEGv8i16;
1960 } else if (Ty == LLT::fixed_vector(16, 8)) {
1961 Opc = IsASHR ? AArch64::SSHLv16i8 : AArch64::USHLv16i8;
1962 NegOpc = AArch64::NEGv16i8;
1963 } else if (Ty == LLT::fixed_vector(8, 8)) {
1964 Opc = IsASHR ? AArch64::SSHLv8i8 : AArch64::USHLv8i8;
1965 NegOpc = AArch64::NEGv8i8;
1966 } else {
1967 LLVM_DEBUG(dbgs() << "Unhandled G_ASHR type");
1968 return false;
1969 }
1970
1971 auto Neg = MIB.buildInstr(NegOpc, {RC}, {Src2Reg});
1973 auto SShl = MIB.buildInstr(Opc, {DstReg}, {Src1Reg, Neg});
1975 I.eraseFromParent();
1976 return true;
1977}
1978
1979bool AArch64InstructionSelector::selectVaStartAAPCS(
1981 return false;
1982}
1983
1984bool AArch64InstructionSelector::selectVaStartDarwin(
1987 Register ListReg = I.getOperand(0).getReg();
1988
1989 Register ArgsAddrReg = MRI.createVirtualRegister(&AArch64::GPR64RegClass);
1990
1991 int FrameIdx = FuncInfo->getVarArgsStackIndex();
1993 MF.getFunction().getCallingConv())) {
1994 FrameIdx = FuncInfo->getVarArgsGPRSize() > 0
1995 ? FuncInfo->getVarArgsGPRIndex()
1996 : FuncInfo->getVarArgsStackIndex();
1997 }
1998
1999 auto MIB =
2000 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(AArch64::ADDXri))
2001 .addDef(ArgsAddrReg)
2002 .addFrameIndex(FrameIdx)
2003 .addImm(0)
2004 .addImm(0);
2005
2007
2008 MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(AArch64::STRXui))
2009 .addUse(ArgsAddrReg)
2010 .addUse(ListReg)
2011 .addImm(0)
2012 .addMemOperand(*I.memoperands_begin());
2013
2015 I.eraseFromParent();
2016 return true;
2017}
2018
2019void AArch64InstructionSelector::materializeLargeCMVal(
2020 MachineInstr &I, const Value *V, unsigned OpFlags) {
2021 MachineBasicBlock &MBB = *I.getParent();
2022 MachineFunction &MF = *MBB.getParent();
2024
2025 auto MovZ = MIB.buildInstr(AArch64::MOVZXi, {&AArch64::GPR64RegClass}, {});
2026 MovZ->addOperand(MF, I.getOperand(1));
2027 MovZ->getOperand(1).setTargetFlags(OpFlags | AArch64II::MO_G0 |
2029 MovZ->addOperand(MF, MachineOperand::CreateImm(0));
2031
2032 auto BuildMovK = [&](Register SrcReg, unsigned char Flags, unsigned Offset,
2033 Register ForceDstReg) {
2034 Register DstReg = ForceDstReg
2035 ? ForceDstReg
2036 : MRI.createVirtualRegister(&AArch64::GPR64RegClass);
2037 auto MovI = MIB.buildInstr(AArch64::MOVKXi).addDef(DstReg).addUse(SrcReg);
2038 if (auto *GV = dyn_cast<GlobalValue>(V)) {
2040 GV, MovZ->getOperand(1).getOffset(), Flags));
2041 } else {
2042 MovI->addOperand(
2043 MF, MachineOperand::CreateBA(cast<BlockAddress>(V),
2044 MovZ->getOperand(1).getOffset(), Flags));
2045 }
2046 MovI->addOperand(MF, MachineOperand::CreateImm(Offset));
2048 return DstReg;
2049 };
2050 Register DstReg = BuildMovK(MovZ.getReg(0),
2052 DstReg = BuildMovK(DstReg, AArch64II::MO_G2 | AArch64II::MO_NC, 32, 0);
2053 BuildMovK(DstReg, AArch64II::MO_G3, 48, I.getOperand(0).getReg());
2054}
2055
2056bool AArch64InstructionSelector::preISelLower(MachineInstr &I) {
2057 MachineBasicBlock &MBB = *I.getParent();
2058 MachineFunction &MF = *MBB.getParent();
2060
2061 switch (I.getOpcode()) {
2062 case TargetOpcode::G_STORE: {
2063 bool Changed = contractCrossBankCopyIntoStore(I, MRI);
2064 MachineOperand &SrcOp = I.getOperand(0);
2065 if (MRI.getType(SrcOp.getReg()).isPointer()) {
2066 // Allow matching with imported patterns for stores of pointers. Unlike
2067 // G_LOAD/G_PTR_ADD, we may not have selected all users. So, emit a copy
2068 // and constrain.
2069 auto Copy = MIB.buildCopy(LLT::scalar(64), SrcOp);
2070 Register NewSrc = Copy.getReg(0);
2071 SrcOp.setReg(NewSrc);
2072 RBI.constrainGenericRegister(NewSrc, AArch64::GPR64RegClass, MRI);
2073 Changed = true;
2074 }
2075 return Changed;
2076 }
2077 case TargetOpcode::G_PTR_ADD:
2078 return convertPtrAddToAdd(I, MRI);
2079 case TargetOpcode::G_LOAD: {
2080 // For scalar loads of pointers, we try to convert the dest type from p0
2081 // to s64 so that our imported patterns can match. Like with the G_PTR_ADD
2082 // conversion, this should be ok because all users should have been
2083 // selected already, so the type doesn't matter for them.
2084 Register DstReg = I.getOperand(0).getReg();
2085 const LLT DstTy = MRI.getType(DstReg);
2086 if (!DstTy.isPointer())
2087 return false;
2088 MRI.setType(DstReg, LLT::scalar(64));
2089 return true;
2090 }
2091 case AArch64::G_DUP: {
2092 // Convert the type from p0 to s64 to help selection.
2093 LLT DstTy = MRI.getType(I.getOperand(0).getReg());
2094 if (!DstTy.isPointerVector())
2095 return false;
2096 auto NewSrc = MIB.buildCopy(LLT::scalar(64), I.getOperand(1).getReg());
2097 MRI.setType(I.getOperand(0).getReg(),
2098 DstTy.changeElementType(LLT::scalar(64)));
2099 MRI.setRegClass(NewSrc.getReg(0), &AArch64::GPR64RegClass);
2100 I.getOperand(1).setReg(NewSrc.getReg(0));
2101 return true;
2102 }
2103 case TargetOpcode::G_UITOFP:
2104 case TargetOpcode::G_SITOFP: {
2105 // If both source and destination regbanks are FPR, then convert the opcode
2106 // to G_SITOF so that the importer can select it to an fpr variant.
2107 // Otherwise, it ends up matching an fpr/gpr variant and adding a cross-bank
2108 // copy.
2109 Register SrcReg = I.getOperand(1).getReg();
2110 LLT SrcTy = MRI.getType(SrcReg);
2111 LLT DstTy = MRI.getType(I.getOperand(0).getReg());
2112 if (SrcTy.isVector() || SrcTy.getSizeInBits() != DstTy.getSizeInBits())
2113 return false;
2114
2115 if (RBI.getRegBank(SrcReg, MRI, TRI)->getID() == AArch64::FPRRegBankID) {
2116 if (I.getOpcode() == TargetOpcode::G_SITOFP)
2117 I.setDesc(TII.get(AArch64::G_SITOF));
2118 else
2119 I.setDesc(TII.get(AArch64::G_UITOF));
2120 return true;
2121 }
2122 return false;
2123 }
2124 default:
2125 return false;
2126 }
2127}
2128
2129/// This lowering tries to look for G_PTR_ADD instructions and then converts
2130/// them to a standard G_ADD with a COPY on the source.
2131///
2132/// The motivation behind this is to expose the add semantics to the imported
2133/// tablegen patterns. We shouldn't need to check for uses being loads/stores,
2134/// because the selector works bottom up, uses before defs. By the time we
2135/// end up trying to select a G_PTR_ADD, we should have already attempted to
2136/// fold this into addressing modes and were therefore unsuccessful.
2137bool AArch64InstructionSelector::convertPtrAddToAdd(
2139 assert(I.getOpcode() == TargetOpcode::G_PTR_ADD && "Expected G_PTR_ADD");
2140 Register DstReg = I.getOperand(0).getReg();
2141 Register AddOp1Reg = I.getOperand(1).getReg();
2142 const LLT PtrTy = MRI.getType(DstReg);
2143 if (PtrTy.getAddressSpace() != 0)
2144 return false;
2145
2146 const LLT CastPtrTy =
2147 PtrTy.isVector() ? LLT::fixed_vector(2, 64) : LLT::scalar(64);
2148 auto PtrToInt = MIB.buildPtrToInt(CastPtrTy, AddOp1Reg);
2149 // Set regbanks on the registers.
2150 if (PtrTy.isVector())
2151 MRI.setRegBank(PtrToInt.getReg(0), RBI.getRegBank(AArch64::FPRRegBankID));
2152 else
2153 MRI.setRegBank(PtrToInt.getReg(0), RBI.getRegBank(AArch64::GPRRegBankID));
2154
2155 // Now turn the %dst(p0) = G_PTR_ADD %base, off into:
2156 // %dst(intty) = G_ADD %intbase, off
2157 I.setDesc(TII.get(TargetOpcode::G_ADD));
2158 MRI.setType(DstReg, CastPtrTy);
2159 I.getOperand(1).setReg(PtrToInt.getReg(0));
2160 if (!select(*PtrToInt)) {
2161 LLVM_DEBUG(dbgs() << "Failed to select G_PTRTOINT in convertPtrAddToAdd");
2162 return false;
2163 }
2164
2165 // Also take the opportunity here to try to do some optimization.
2166 // Try to convert this into a G_SUB if the offset is a 0-x negate idiom.
2167 Register NegatedReg;
2168 if (!mi_match(I.getOperand(2).getReg(), MRI, m_Neg(m_Reg(NegatedReg))))
2169 return true;
2170 I.getOperand(2).setReg(NegatedReg);
2171 I.setDesc(TII.get(TargetOpcode::G_SUB));
2172 return true;
2173}
2174
2175bool AArch64InstructionSelector::earlySelectSHL(MachineInstr &I,
2177 // We try to match the immediate variant of LSL, which is actually an alias
2178 // for a special case of UBFM. Otherwise, we fall back to the imported
2179 // selector which will match the register variant.
2180 assert(I.getOpcode() == TargetOpcode::G_SHL && "unexpected op");
2181 const auto &MO = I.getOperand(2);
2182 auto VRegAndVal = getIConstantVRegVal(MO.getReg(), MRI);
2183 if (!VRegAndVal)
2184 return false;
2185
2186 const LLT DstTy = MRI.getType(I.getOperand(0).getReg());
2187 if (DstTy.isVector())
2188 return false;
2189 bool Is64Bit = DstTy.getSizeInBits() == 64;
2190 auto Imm1Fn = Is64Bit ? selectShiftA_64(MO) : selectShiftA_32(MO);
2191 auto Imm2Fn = Is64Bit ? selectShiftB_64(MO) : selectShiftB_32(MO);
2192
2193 if (!Imm1Fn || !Imm2Fn)
2194 return false;
2195
2196 auto NewI =
2197 MIB.buildInstr(Is64Bit ? AArch64::UBFMXri : AArch64::UBFMWri,
2198 {I.getOperand(0).getReg()}, {I.getOperand(1).getReg()});
2199
2200 for (auto &RenderFn : *Imm1Fn)
2201 RenderFn(NewI);
2202 for (auto &RenderFn : *Imm2Fn)
2203 RenderFn(NewI);
2204
2205 I.eraseFromParent();
2206 return constrainSelectedInstRegOperands(*NewI, TII, TRI, RBI);
2207}
2208
2209bool AArch64InstructionSelector::contractCrossBankCopyIntoStore(
2211 assert(I.getOpcode() == TargetOpcode::G_STORE && "Expected G_STORE");
2212 // If we're storing a scalar, it doesn't matter what register bank that
2213 // scalar is on. All that matters is the size.
2214 //
2215 // So, if we see something like this (with a 32-bit scalar as an example):
2216 //
2217 // %x:gpr(s32) = ... something ...
2218 // %y:fpr(s32) = COPY %x:gpr(s32)
2219 // G_STORE %y:fpr(s32)
2220 //
2221 // We can fix this up into something like this:
2222 //
2223 // G_STORE %x:gpr(s32)
2224 //
2225 // And then continue the selection process normally.
2226 Register DefDstReg = getSrcRegIgnoringCopies(I.getOperand(0).getReg(), MRI);
2227 if (!DefDstReg.isValid())
2228 return false;
2229 LLT DefDstTy = MRI.getType(DefDstReg);
2230 Register StoreSrcReg = I.getOperand(0).getReg();
2231 LLT StoreSrcTy = MRI.getType(StoreSrcReg);
2232
2233 // If we get something strange like a physical register, then we shouldn't
2234 // go any further.
2235 if (!DefDstTy.isValid())
2236 return false;
2237
2238 // Are the source and dst types the same size?
2239 if (DefDstTy.getSizeInBits() != StoreSrcTy.getSizeInBits())
2240 return false;
2241
2242 if (RBI.getRegBank(StoreSrcReg, MRI, TRI) ==
2243 RBI.getRegBank(DefDstReg, MRI, TRI))
2244 return false;
2245
2246 // We have a cross-bank copy, which is entering a store. Let's fold it.
2247 I.getOperand(0).setReg(DefDstReg);
2248 return true;
2249}
2250
2251bool AArch64InstructionSelector::earlySelect(MachineInstr &I) {
2252 assert(I.getParent() && "Instruction should be in a basic block!");
2253 assert(I.getParent()->getParent() && "Instruction should be in a function!");
2254
2255 MachineBasicBlock &MBB = *I.getParent();
2256 MachineFunction &MF = *MBB.getParent();
2258
2259 switch (I.getOpcode()) {
2260 case AArch64::G_DUP: {
2261 // Before selecting a DUP instruction, check if it is better selected as a
2262 // MOV or load from a constant pool.
2263 Register Src = I.getOperand(1).getReg();
2264 auto ValAndVReg = getAnyConstantVRegValWithLookThrough(Src, MRI);
2265 if (!ValAndVReg)
2266 return false;
2267 LLVMContext &Ctx = MF.getFunction().getContext();
2268 Register Dst = I.getOperand(0).getReg();
2270 MRI.getType(Dst).getNumElements(),
2271 ConstantInt::get(Type::getIntNTy(Ctx, MRI.getType(Src).getSizeInBits()),
2272 ValAndVReg->Value));
2273 if (!emitConstantVector(Dst, CV, MIB, MRI))
2274 return false;
2275 I.eraseFromParent();
2276 return true;
2277 }
2278 case TargetOpcode::G_SEXT:
2279 // Check for i64 sext(i32 vector_extract) prior to tablegen to select SMOV
2280 // over a normal extend.
2281 if (selectUSMovFromExtend(I, MRI))
2282 return true;
2283 return false;
2284 case TargetOpcode::G_BR:
2285 return false;
2286 case TargetOpcode::G_SHL:
2287 return earlySelectSHL(I, MRI);
2288 case TargetOpcode::G_CONSTANT: {
2289 bool IsZero = false;
2290 if (I.getOperand(1).isCImm())
2291 IsZero = I.getOperand(1).getCImm()->isZero();
2292 else if (I.getOperand(1).isImm())
2293 IsZero = I.getOperand(1).getImm() == 0;
2294
2295 if (!IsZero)
2296 return false;
2297
2298 Register DefReg = I.getOperand(0).getReg();
2299 LLT Ty = MRI.getType(DefReg);
2300 if (Ty.getSizeInBits() == 64) {
2301 I.getOperand(1).ChangeToRegister(AArch64::XZR, false);
2302 RBI.constrainGenericRegister(DefReg, AArch64::GPR64RegClass, MRI);
2303 } else if (Ty.getSizeInBits() == 32) {
2304 I.getOperand(1).ChangeToRegister(AArch64::WZR, false);
2305 RBI.constrainGenericRegister(DefReg, AArch64::GPR32RegClass, MRI);
2306 } else
2307 return false;
2308
2309 I.setDesc(TII.get(TargetOpcode::COPY));
2310 return true;
2311 }
2312
2313 case TargetOpcode::G_ADD: {
2314 // Check if this is being fed by a G_ICMP on either side.
2315 //
2316 // (cmp pred, x, y) + z
2317 //
2318 // In the above case, when the cmp is true, we increment z by 1. So, we can
2319 // fold the add into the cset for the cmp by using cinc.
2320 //
2321 // FIXME: This would probably be a lot nicer in PostLegalizerLowering.
2322 Register AddDst = I.getOperand(0).getReg();
2323 Register AddLHS = I.getOperand(1).getReg();
2324 Register AddRHS = I.getOperand(2).getReg();
2325 // Only handle scalars.
2326 LLT Ty = MRI.getType(AddLHS);
2327 if (Ty.isVector())
2328 return false;
2329 // Since G_ICMP is modeled as ADDS/SUBS/ANDS, we can handle 32 bits or 64
2330 // bits.
2331 unsigned Size = Ty.getSizeInBits();
2332 if (Size != 32 && Size != 64)
2333 return false;
2334 auto MatchCmp = [&](Register Reg) -> MachineInstr * {
2335 if (!MRI.hasOneNonDBGUse(Reg))
2336 return nullptr;
2337 // If the LHS of the add is 32 bits, then we want to fold a 32-bit
2338 // compare.
2339 if (Size == 32)
2340 return getOpcodeDef(TargetOpcode::G_ICMP, Reg, MRI);
2341 // We model scalar compares using 32-bit destinations right now.
2342 // If it's a 64-bit compare, it'll have 64-bit sources.
2343 Register ZExt;
2344 if (!mi_match(Reg, MRI,
2346 return nullptr;
2347 auto *Cmp = getOpcodeDef(TargetOpcode::G_ICMP, ZExt, MRI);
2348 if (!Cmp ||
2349 MRI.getType(Cmp->getOperand(2).getReg()).getSizeInBits() != 64)
2350 return nullptr;
2351 return Cmp;
2352 };
2353 // Try to match
2354 // z + (cmp pred, x, y)
2355 MachineInstr *Cmp = MatchCmp(AddRHS);
2356 if (!Cmp) {
2357 // (cmp pred, x, y) + z
2358 std::swap(AddLHS, AddRHS);
2359 Cmp = MatchCmp(AddRHS);
2360 if (!Cmp)
2361 return false;
2362 }
2363 auto &PredOp = Cmp->getOperand(1);
2364 auto Pred = static_cast<CmpInst::Predicate>(PredOp.getPredicate());
2365 const AArch64CC::CondCode InvCC =
2368 emitIntegerCompare(/*LHS=*/Cmp->getOperand(2),
2369 /*RHS=*/Cmp->getOperand(3), PredOp, MIB);
2370 emitCSINC(/*Dst=*/AddDst, /*Src =*/AddLHS, /*Src2=*/AddLHS, InvCC, MIB);
2371 I.eraseFromParent();
2372 return true;
2373 }
2374 case TargetOpcode::G_OR: {
2375 // Look for operations that take the lower `Width=Size-ShiftImm` bits of
2376 // `ShiftSrc` and insert them into the upper `Width` bits of `MaskSrc` via
2377 // shifting and masking that we can replace with a BFI (encoded as a BFM).
2378 Register Dst = I.getOperand(0).getReg();
2379 LLT Ty = MRI.getType(Dst);
2380
2381 if (!Ty.isScalar())
2382 return false;
2383
2384 unsigned Size = Ty.getSizeInBits();
2385 if (Size != 32 && Size != 64)
2386 return false;
2387
2388 Register ShiftSrc;
2389 int64_t ShiftImm;
2390 Register MaskSrc;
2391 int64_t MaskImm;
2392 if (!mi_match(
2393 Dst, MRI,
2394 m_GOr(m_OneNonDBGUse(m_GShl(m_Reg(ShiftSrc), m_ICst(ShiftImm))),
2395 m_OneNonDBGUse(m_GAnd(m_Reg(MaskSrc), m_ICst(MaskImm))))))
2396 return false;
2397
2398 if (ShiftImm > Size || ((1ULL << ShiftImm) - 1ULL) != uint64_t(MaskImm))
2399 return false;
2400
2401 int64_t Immr = Size - ShiftImm;
2402 int64_t Imms = Size - ShiftImm - 1;
2403 unsigned Opc = Size == 32 ? AArch64::BFMWri : AArch64::BFMXri;
2404 emitInstr(Opc, {Dst}, {MaskSrc, ShiftSrc, Immr, Imms}, MIB);
2405 I.eraseFromParent();
2406 return true;
2407 }
2408 case TargetOpcode::G_FENCE: {
2409 if (I.getOperand(1).getImm() == 0)
2410 BuildMI(MBB, I, MIMetadata(I), TII.get(TargetOpcode::MEMBARRIER));
2411 else
2412 BuildMI(MBB, I, MIMetadata(I), TII.get(AArch64::DMB))
2413 .addImm(I.getOperand(0).getImm() == 4 ? 0x9 : 0xb);
2414 I.eraseFromParent();
2415 return true;
2416 }
2417 default:
2418 return false;
2419 }
2420}
2421
2422bool AArch64InstructionSelector::select(MachineInstr &I) {
2423 assert(I.getParent() && "Instruction should be in a basic block!");
2424 assert(I.getParent()->getParent() && "Instruction should be in a function!");
2425
2426 MachineBasicBlock &MBB = *I.getParent();
2427 MachineFunction &MF = *MBB.getParent();
2429
2430 const AArch64Subtarget *Subtarget = &MF.getSubtarget<AArch64Subtarget>();
2431 if (Subtarget->requiresStrictAlign()) {
2432 // We don't support this feature yet.
2433 LLVM_DEBUG(dbgs() << "AArch64 GISel does not support strict-align yet\n");
2434 return false;
2435 }
2436
2438
2439 unsigned Opcode = I.getOpcode();
2440 // G_PHI requires same handling as PHI
2441 if (!I.isPreISelOpcode() || Opcode == TargetOpcode::G_PHI) {
2442 // Certain non-generic instructions also need some special handling.
2443
2444 if (Opcode == TargetOpcode::LOAD_STACK_GUARD)
2446
2447 if (Opcode == TargetOpcode::PHI || Opcode == TargetOpcode::G_PHI) {
2448 const Register DefReg = I.getOperand(0).getReg();
2449 const LLT DefTy = MRI.getType(DefReg);
2450
2451 const RegClassOrRegBank &RegClassOrBank =
2452 MRI.getRegClassOrRegBank(DefReg);
2453
2454 const TargetRegisterClass *DefRC
2455 = RegClassOrBank.dyn_cast<const TargetRegisterClass *>();
2456 if (!DefRC) {
2457 if (!DefTy.isValid()) {
2458 LLVM_DEBUG(dbgs() << "PHI operand has no type, not a gvreg?\n");
2459 return false;
2460 }
2461 const RegisterBank &RB = *RegClassOrBank.get<const RegisterBank *>();
2462 DefRC = getRegClassForTypeOnBank(DefTy, RB);
2463 if (!DefRC) {
2464 LLVM_DEBUG(dbgs() << "PHI operand has unexpected size/bank\n");
2465 return false;
2466 }
2467 }
2468
2469 I.setDesc(TII.get(TargetOpcode::PHI));
2470
2471 return RBI.constrainGenericRegister(DefReg, *DefRC, MRI);
2472 }
2473
2474 if (I.isCopy())
2475 return selectCopy(I, TII, MRI, TRI, RBI);
2476
2477 if (I.isDebugInstr())
2478 return selectDebugInstr(I, MRI, RBI);
2479
2480 return true;
2481 }
2482
2483
2484 if (I.getNumOperands() != I.getNumExplicitOperands()) {
2485 LLVM_DEBUG(
2486 dbgs() << "Generic instruction has unexpected implicit operands\n");
2487 return false;
2488 }
2489
2490 // Try to do some lowering before we start instruction selecting. These
2491 // lowerings are purely transformations on the input G_MIR and so selection
2492 // must continue after any modification of the instruction.
2493 if (preISelLower(I)) {
2494 Opcode = I.getOpcode(); // The opcode may have been modified, refresh it.
2495 }
2496
2497 // There may be patterns where the importer can't deal with them optimally,
2498 // but does select it to a suboptimal sequence so our custom C++ selection
2499 // code later never has a chance to work on it. Therefore, we have an early
2500 // selection attempt here to give priority to certain selection routines
2501 // over the imported ones.
2502 if (earlySelect(I))
2503 return true;
2504
2505 if (selectImpl(I, *CoverageInfo))
2506 return true;
2507
2508 LLT Ty =
2509 I.getOperand(0).isReg() ? MRI.getType(I.getOperand(0).getReg()) : LLT{};
2510
2511 switch (Opcode) {
2512 case TargetOpcode::G_SBFX:
2513 case TargetOpcode::G_UBFX: {
2514 static const unsigned OpcTable[2][2] = {
2515 {AArch64::UBFMWri, AArch64::UBFMXri},
2516 {AArch64::SBFMWri, AArch64::SBFMXri}};
2517 bool IsSigned = Opcode == TargetOpcode::G_SBFX;
2518 unsigned Size = Ty.getSizeInBits();
2519 unsigned Opc = OpcTable[IsSigned][Size == 64];
2520 auto Cst1 =
2521 getIConstantVRegValWithLookThrough(I.getOperand(2).getReg(), MRI);
2522 assert(Cst1 && "Should have gotten a constant for src 1?");
2523 auto Cst2 =
2524 getIConstantVRegValWithLookThrough(I.getOperand(3).getReg(), MRI);
2525 assert(Cst2 && "Should have gotten a constant for src 2?");
2526 auto LSB = Cst1->Value.getZExtValue();
2527 auto Width = Cst2->Value.getZExtValue();
2528 auto BitfieldInst =
2529 MIB.buildInstr(Opc, {I.getOperand(0)}, {I.getOperand(1)})
2530 .addImm(LSB)
2531 .addImm(LSB + Width - 1);
2532 I.eraseFromParent();
2533 return constrainSelectedInstRegOperands(*BitfieldInst, TII, TRI, RBI);
2534 }
2535 case TargetOpcode::G_BRCOND:
2536 return selectCompareBranch(I, MF, MRI);
2537
2538 case TargetOpcode::G_BRINDIRECT: {
2539 I.setDesc(TII.get(AArch64::BR));
2541 }
2542
2543 case TargetOpcode::G_BRJT:
2544 return selectBrJT(I, MRI);
2545
2546 case AArch64::G_ADD_LOW: {
2547 // This op may have been separated from it's ADRP companion by the localizer
2548 // or some other code motion pass. Given that many CPUs will try to
2549 // macro fuse these operations anyway, select this into a MOVaddr pseudo
2550 // which will later be expanded into an ADRP+ADD pair after scheduling.
2551 MachineInstr *BaseMI = MRI.getVRegDef(I.getOperand(1).getReg());
2552 if (BaseMI->getOpcode() != AArch64::ADRP) {
2553 I.setDesc(TII.get(AArch64::ADDXri));
2554 I.addOperand(MachineOperand::CreateImm(0));
2556 }
2557 assert(TM.getCodeModel() == CodeModel::Small &&
2558 "Expected small code model");
2559 auto Op1 = BaseMI->getOperand(1);
2560 auto Op2 = I.getOperand(2);
2561 auto MovAddr = MIB.buildInstr(AArch64::MOVaddr, {I.getOperand(0)}, {})
2562 .addGlobalAddress(Op1.getGlobal(), Op1.getOffset(),
2563 Op1.getTargetFlags())
2564 .addGlobalAddress(Op2.getGlobal(), Op2.getOffset(),
2565 Op2.getTargetFlags());
2566 I.eraseFromParent();
2567 return constrainSelectedInstRegOperands(*MovAddr, TII, TRI, RBI);
2568 }
2569
2570 case TargetOpcode::G_FCONSTANT:
2571 case TargetOpcode::G_CONSTANT: {
2572 const bool isFP = Opcode == TargetOpcode::G_FCONSTANT;
2573
2574 const LLT s8 = LLT::scalar(8);
2575 const LLT s16 = LLT::scalar(16);
2576 const LLT s32 = LLT::scalar(32);
2577 const LLT s64 = LLT::scalar(64);
2578 const LLT s128 = LLT::scalar(128);
2579 const LLT p0 = LLT::pointer(0, 64);
2580
2581 const Register DefReg = I.getOperand(0).getReg();
2582 const LLT DefTy = MRI.getType(DefReg);
2583 const unsigned DefSize = DefTy.getSizeInBits();
2584 const RegisterBank &RB = *RBI.getRegBank(DefReg, MRI, TRI);
2585
2586 // FIXME: Redundant check, but even less readable when factored out.
2587 if (isFP) {
2588 if (Ty != s16 && Ty != s32 && Ty != s64 && Ty != s128) {
2589 LLVM_DEBUG(dbgs() << "Unable to materialize FP " << Ty
2590 << " constant, expected: " << s16 << " or " << s32
2591 << " or " << s64 << " or " << s128 << '\n');
2592 return false;
2593 }
2594
2595 if (RB.getID() != AArch64::FPRRegBankID) {
2596 LLVM_DEBUG(dbgs() << "Unable to materialize FP " << Ty
2597 << " constant on bank: " << RB
2598 << ", expected: FPR\n");
2599 return false;
2600 }
2601
2602 // The case when we have 0.0 is covered by tablegen. Reject it here so we
2603 // can be sure tablegen works correctly and isn't rescued by this code.
2604 // 0.0 is not covered by tablegen for FP128. So we will handle this
2605 // scenario in the code here.
2606 if (DefSize != 128 && I.getOperand(1).getFPImm()->isExactlyValue(0.0))
2607 return false;
2608 } else {
2609 // s32 and s64 are covered by tablegen.
2610 if (Ty != p0 && Ty != s8 && Ty != s16) {
2611 LLVM_DEBUG(dbgs() << "Unable to materialize integer " << Ty
2612 << " constant, expected: " << s32 << ", " << s64
2613 << ", or " << p0 << '\n');
2614 return false;
2615 }
2616
2617 if (RB.getID() != AArch64::GPRRegBankID) {
2618 LLVM_DEBUG(dbgs() << "Unable to materialize integer " << Ty
2619 << " constant on bank: " << RB
2620 << ", expected: GPR\n");
2621 return false;
2622 }
2623 }
2624
2625 if (isFP) {
2626 const TargetRegisterClass &FPRRC = *getRegClassForTypeOnBank(DefTy, RB);
2627 // For 16, 64, and 128b values, emit a constant pool load.
2628 switch (DefSize) {
2629 default:
2630 llvm_unreachable("Unexpected destination size for G_FCONSTANT?");
2631 case 32:
2632 case 64: {
2633 bool OptForSize = shouldOptForSize(&MF);
2634 const auto &TLI = MF.getSubtarget().getTargetLowering();
2635 // If TLI says that this fpimm is illegal, then we'll expand to a
2636 // constant pool load.
2637 if (TLI->isFPImmLegal(I.getOperand(1).getFPImm()->getValueAPF(),
2638 EVT::getFloatingPointVT(DefSize), OptForSize))
2639 break;
2640 [[fallthrough]];
2641 }
2642 case 16:
2643 case 128: {
2644 auto *FPImm = I.getOperand(1).getFPImm();
2645 auto *LoadMI = emitLoadFromConstantPool(FPImm, MIB);
2646 if (!LoadMI) {
2647 LLVM_DEBUG(dbgs() << "Failed to load double constant pool entry\n");
2648 return false;
2649 }
2650 MIB.buildCopy({DefReg}, {LoadMI->getOperand(0).getReg()});
2651 I.eraseFromParent();
2652 return RBI.constrainGenericRegister(DefReg, FPRRC, MRI);
2653 }
2654 }
2655
2656 assert((DefSize == 32 || DefSize == 64) && "Unexpected const def size");
2657 // Either emit a FMOV, or emit a copy to emit a normal mov.
2658 const Register DefGPRReg = MRI.createVirtualRegister(
2659 DefSize == 32 ? &AArch64::GPR32RegClass : &AArch64::GPR64RegClass);
2660 MachineOperand &RegOp = I.getOperand(0);
2661 RegOp.setReg(DefGPRReg);
2662 MIB.setInsertPt(MIB.getMBB(), std::next(I.getIterator()));
2663 MIB.buildCopy({DefReg}, {DefGPRReg});
2664
2665 if (!RBI.constrainGenericRegister(DefReg, FPRRC, MRI)) {
2666 LLVM_DEBUG(dbgs() << "Failed to constrain G_FCONSTANT def operand\n");
2667 return false;
2668 }
2669
2670 MachineOperand &ImmOp = I.getOperand(1);
2671 // FIXME: Is going through int64_t always correct?
2672 ImmOp.ChangeToImmediate(
2674 } else if (I.getOperand(1).isCImm()) {
2675 uint64_t Val = I.getOperand(1).getCImm()->getZExtValue();
2676 I.getOperand(1).ChangeToImmediate(Val);
2677 } else if (I.getOperand(1).isImm()) {
2678 uint64_t Val = I.getOperand(1).getImm();
2679 I.getOperand(1).ChangeToImmediate(Val);
2680 }
2681
2682 const unsigned MovOpc =
2683 DefSize == 64 ? AArch64::MOVi64imm : AArch64::MOVi32imm;
2684 I.setDesc(TII.get(MovOpc));
2686 return true;
2687 }
2688 case TargetOpcode::G_EXTRACT: {
2689 Register DstReg = I.getOperand(0).getReg();
2690 Register SrcReg = I.getOperand(1).getReg();
2691 LLT SrcTy = MRI.getType(SrcReg);
2692 LLT DstTy = MRI.getType(DstReg);
2693 (void)DstTy;
2694 unsigned SrcSize = SrcTy.getSizeInBits();
2695
2696 if (SrcTy.getSizeInBits() > 64) {
2697 // This should be an extract of an s128, which is like a vector extract.
2698 if (SrcTy.getSizeInBits() != 128)
2699 return false;
2700 // Only support extracting 64 bits from an s128 at the moment.
2701 if (DstTy.getSizeInBits() != 64)
2702 return false;
2703
2704 unsigned Offset = I.getOperand(2).getImm();
2705 if (Offset % 64 != 0)
2706 return false;
2707
2708 // Check we have the right regbank always.
2709 const RegisterBank &SrcRB = *RBI.getRegBank(SrcReg, MRI, TRI);
2710 const RegisterBank &DstRB = *RBI.getRegBank(DstReg, MRI, TRI);
2711 assert(SrcRB.getID() == DstRB.getID() && "Wrong extract regbank!");
2712
2713 if (SrcRB.getID() == AArch64::GPRRegBankID) {
2714 auto NewI =
2715 MIB.buildInstr(TargetOpcode::COPY, {DstReg}, {})
2716 .addUse(SrcReg, 0,
2717 Offset == 0 ? AArch64::sube64 : AArch64::subo64);
2718 constrainOperandRegClass(MF, TRI, MRI, TII, RBI, *NewI,
2719 AArch64::GPR64RegClass, NewI->getOperand(0));
2720 I.eraseFromParent();
2721 return true;
2722 }
2723
2724 // Emit the same code as a vector extract.
2725 // Offset must be a multiple of 64.
2726 unsigned LaneIdx = Offset / 64;
2727 MachineInstr *Extract = emitExtractVectorElt(
2728 DstReg, DstRB, LLT::scalar(64), SrcReg, LaneIdx, MIB);
2729 if (!Extract)
2730 return false;
2731 I.eraseFromParent();
2732 return true;
2733 }
2734
2735 I.setDesc(TII.get(SrcSize == 64 ? AArch64::UBFMXri : AArch64::UBFMWri));
2736 MachineInstrBuilder(MF, I).addImm(I.getOperand(2).getImm() +
2737 Ty.getSizeInBits() - 1);
2738
2739 if (SrcSize < 64) {
2740 assert(SrcSize == 32 && DstTy.getSizeInBits() == 16 &&
2741 "unexpected G_EXTRACT types");
2743 }
2744
2745 DstReg = MRI.createGenericVirtualRegister(LLT::scalar(64));
2746 MIB.setInsertPt(MIB.getMBB(), std::next(I.getIterator()));
2747 MIB.buildInstr(TargetOpcode::COPY, {I.getOperand(0).getReg()}, {})
2748 .addReg(DstReg, 0, AArch64::sub_32);
2749 RBI.constrainGenericRegister(I.getOperand(0).getReg(),
2750 AArch64::GPR32RegClass, MRI);
2751 I.getOperand(0).setReg(DstReg);
2752
2754 }
2755
2756 case TargetOpcode::G_INSERT: {
2757 LLT SrcTy = MRI.getType(I.getOperand(2).getReg());
2758 LLT DstTy = MRI.getType(I.getOperand(0).getReg());
2759 unsigned DstSize = DstTy.getSizeInBits();
2760 // Larger inserts are vectors, same-size ones should be something else by
2761 // now (split up or turned into COPYs).
2762 if (Ty.getSizeInBits() > 64 || SrcTy.getSizeInBits() > 32)
2763 return false;
2764
2765 I.setDesc(TII.get(DstSize == 64 ? AArch64::BFMXri : AArch64::BFMWri));
2766 unsigned LSB = I.getOperand(3).getImm();
2767 unsigned Width = MRI.getType(I.getOperand(2).getReg()).getSizeInBits();
2768 I.getOperand(3).setImm((DstSize - LSB) % DstSize);
2769 MachineInstrBuilder(MF, I).addImm(Width - 1);
2770
2771 if (DstSize < 64) {
2772 assert(DstSize == 32 && SrcTy.getSizeInBits() == 16 &&
2773 "unexpected G_INSERT types");
2775 }
2776
2777 Register SrcReg = MRI.createGenericVirtualRegister(LLT::scalar(64));
2778 BuildMI(MBB, I.getIterator(), I.getDebugLoc(),
2779 TII.get(AArch64::SUBREG_TO_REG))
2780 .addDef(SrcReg)
2781 .addImm(0)
2782 .addUse(I.getOperand(2).getReg())
2783 .addImm(AArch64::sub_32);
2784 RBI.constrainGenericRegister(I.getOperand(2).getReg(),
2785 AArch64::GPR32RegClass, MRI);
2786 I.getOperand(2).setReg(SrcReg);
2787
2789 }
2790 case TargetOpcode::G_FRAME_INDEX: {
2791 // allocas and G_FRAME_INDEX are only supported in addrspace(0).
2792 if (Ty != LLT::pointer(0, 64)) {
2793 LLVM_DEBUG(dbgs() << "G_FRAME_INDEX pointer has type: " << Ty
2794 << ", expected: " << LLT::pointer(0, 64) << '\n');
2795 return false;
2796 }
2797 I.setDesc(TII.get(AArch64::ADDXri));
2798
2799 // MOs for a #0 shifted immediate.
2800 I.addOperand(MachineOperand::CreateImm(0));
2801 I.addOperand(MachineOperand::CreateImm(0));
2802
2804 }
2805
2806 case TargetOpcode::G_GLOBAL_VALUE: {
2807 auto GV = I.getOperand(1).getGlobal();
2808 if (GV->isThreadLocal())
2809 return selectTLSGlobalValue(I, MRI);
2810
2811 unsigned OpFlags = STI.ClassifyGlobalReference(GV, TM);
2812 if (OpFlags & AArch64II::MO_GOT) {
2813 I.setDesc(TII.get(AArch64::LOADgot));
2814 I.getOperand(1).setTargetFlags(OpFlags);
2815 } else if (TM.getCodeModel() == CodeModel::Large &&
2816 !TM.isPositionIndependent()) {
2817 // Materialize the global using movz/movk instructions.
2818 materializeLargeCMVal(I, GV, OpFlags);
2819 I.eraseFromParent();
2820 return true;
2821 } else if (TM.getCodeModel() == CodeModel::Tiny) {
2822 I.setDesc(TII.get(AArch64::ADR));
2823 I.getOperand(1).setTargetFlags(OpFlags);
2824 } else {
2825 I.setDesc(TII.get(AArch64::MOVaddr));
2826 I.getOperand(1).setTargetFlags(OpFlags | AArch64II::MO_PAGE);
2827 MachineInstrBuilder MIB(MF, I);
2828 MIB.addGlobalAddress(GV, I.getOperand(1).getOffset(),
2830 }
2832 }
2833
2834 case TargetOpcode::G_ZEXTLOAD:
2835 case TargetOpcode::G_LOAD:
2836 case TargetOpcode::G_STORE: {
2837 GLoadStore &LdSt = cast<GLoadStore>(I);
2838 bool IsZExtLoad = I.getOpcode() == TargetOpcode::G_ZEXTLOAD;
2839 LLT PtrTy = MRI.getType(LdSt.getPointerReg());
2840
2841 if (PtrTy != LLT::pointer(0, 64)) {
2842 LLVM_DEBUG(dbgs() << "Load/Store pointer has type: " << PtrTy
2843 << ", expected: " << LLT::pointer(0, 64) << '\n');
2844 return false;
2845 }
2846
2847 uint64_t MemSizeInBytes = LdSt.getMemSize();
2848 unsigned MemSizeInBits = LdSt.getMemSizeInBits();
2849 AtomicOrdering Order = LdSt.getMMO().getSuccessOrdering();
2850
2851 // Need special instructions for atomics that affect ordering.
2852 if (Order != AtomicOrdering::NotAtomic &&
2853 Order != AtomicOrdering::Unordered &&
2854 Order != AtomicOrdering::Monotonic) {
2855 assert(!isa<GZExtLoad>(LdSt));
2856 if (MemSizeInBytes > 64)
2857 return false;
2858
2859 if (isa<GLoad>(LdSt)) {
2860 static constexpr unsigned LDAPROpcodes[] = {
2861 AArch64::LDAPRB, AArch64::LDAPRH, AArch64::LDAPRW, AArch64::LDAPRX};
2862 static constexpr unsigned LDAROpcodes[] = {
2863 AArch64::LDARB, AArch64::LDARH, AArch64::LDARW, AArch64::LDARX};
2864 ArrayRef<unsigned> Opcodes =
2865 STI.hasRCPC() && Order != AtomicOrdering::SequentiallyConsistent
2866 ? LDAPROpcodes
2867 : LDAROpcodes;
2868 I.setDesc(TII.get(Opcodes[Log2_32(MemSizeInBytes)]));
2869 } else {
2870 static constexpr unsigned Opcodes[] = {AArch64::STLRB, AArch64::STLRH,
2871 AArch64::STLRW, AArch64::STLRX};
2872 Register ValReg = LdSt.getReg(0);
2873 if (MRI.getType(ValReg).getSizeInBits() == 64 && MemSizeInBits != 64) {
2874 // Emit a subreg copy of 32 bits.
2875 Register NewVal = MRI.createVirtualRegister(&AArch64::GPR32RegClass);
2876 MIB.buildInstr(TargetOpcode::COPY, {NewVal}, {})
2877 .addReg(I.getOperand(0).getReg(), 0, AArch64::sub_32);
2878 I.getOperand(0).setReg(NewVal);
2879 }
2880 I.setDesc(TII.get(Opcodes[Log2_32(MemSizeInBytes)]));
2881 }
2883 return true;
2884 }
2885
2886#ifndef NDEBUG
2887 const Register PtrReg = LdSt.getPointerReg();
2888 const RegisterBank &PtrRB = *RBI.getRegBank(PtrReg, MRI, TRI);
2889 // Check that the pointer register is valid.
2890 assert(PtrRB.getID() == AArch64::GPRRegBankID &&
2891 "Load/Store pointer operand isn't a GPR");
2892 assert(MRI.getType(PtrReg).isPointer() &&
2893 "Load/Store pointer operand isn't a pointer");
2894#endif
2895
2896 const Register ValReg = LdSt.getReg(0);
2897 const LLT ValTy = MRI.getType(ValReg);
2898 const RegisterBank &RB = *RBI.getRegBank(ValReg, MRI, TRI);
2899
2900 // The code below doesn't support truncating stores, so we need to split it
2901 // again.
2902 if (isa<GStore>(LdSt) && ValTy.getSizeInBits() > MemSizeInBits) {
2903 unsigned SubReg;
2904 LLT MemTy = LdSt.getMMO().getMemoryType();
2905 auto *RC = getRegClassForTypeOnBank(MemTy, RB);
2906 if (!getSubRegForClass(RC, TRI, SubReg))
2907 return false;
2908
2909 // Generate a subreg copy.
2910 auto Copy = MIB.buildInstr(TargetOpcode::COPY, {MemTy}, {})
2911 .addReg(ValReg, 0, SubReg)
2912 .getReg(0);
2913 RBI.constrainGenericRegister(Copy, *RC, MRI);
2914 LdSt.getOperand(0).setReg(Copy);
2915 } else if (isa<GLoad>(LdSt) && ValTy.getSizeInBits() > MemSizeInBits) {
2916 // If this is an any-extending load from the FPR bank, split it into a regular
2917 // load + extend.
2918 if (RB.getID() == AArch64::FPRRegBankID) {
2919 unsigned SubReg;
2920 LLT MemTy = LdSt.getMMO().getMemoryType();
2921 auto *RC = getRegClassForTypeOnBank(MemTy, RB);
2922 if (!getSubRegForClass(RC, TRI, SubReg))
2923 return false;
2924 Register OldDst = LdSt.getReg(0);
2925 Register NewDst =
2926 MRI.createGenericVirtualRegister(LdSt.getMMO().getMemoryType());
2927 LdSt.getOperand(0).setReg(NewDst);
2928 MRI.setRegBank(NewDst, RB);
2929 // Generate a SUBREG_TO_REG to extend it.
2930 MIB.setInsertPt(MIB.getMBB(), std::next(LdSt.getIterator()));
2931 MIB.buildInstr(AArch64::SUBREG_TO_REG, {OldDst}, {})
2932 .addImm(0)
2933 .addUse(NewDst)
2934 .addImm(SubReg);
2935 auto SubRegRC = getRegClassForTypeOnBank(MRI.getType(OldDst), RB);
2936 RBI.constrainGenericRegister(OldDst, *SubRegRC, MRI);
2937 MIB.setInstr(LdSt);
2938 }
2939 }
2940
2941 // Helper lambda for partially selecting I. Either returns the original
2942 // instruction with an updated opcode, or a new instruction.
2943 auto SelectLoadStoreAddressingMode = [&]() -> MachineInstr * {
2944 bool IsStore = isa<GStore>(I);
2945 const unsigned NewOpc =
2946 selectLoadStoreUIOp(I.getOpcode(), RB.getID(), MemSizeInBits);
2947 if (NewOpc == I.getOpcode())
2948 return nullptr;
2949 // Check if we can fold anything into the addressing mode.
2950 auto AddrModeFns =
2951 selectAddrModeIndexed(I.getOperand(1), MemSizeInBytes);
2952 if (!AddrModeFns) {
2953 // Can't fold anything. Use the original instruction.
2954 I.setDesc(TII.get(NewOpc));
2955 I.addOperand(MachineOperand::CreateImm(0));
2956 return &I;
2957 }
2958
2959 // Folded something. Create a new instruction and return it.
2960 auto NewInst = MIB.buildInstr(NewOpc, {}, {}, I.getFlags());
2961 Register CurValReg = I.getOperand(0).getReg();
2962 IsStore ? NewInst.addUse(CurValReg) : NewInst.addDef(CurValReg);
2963 NewInst.cloneMemRefs(I);
2964 for (auto &Fn : *AddrModeFns)
2965 Fn(NewInst);
2966 I.eraseFromParent();
2967 return &*NewInst;
2968 };
2969
2970 MachineInstr *LoadStore = SelectLoadStoreAddressingMode();
2971 if (!LoadStore)
2972 return false;
2973
2974 // If we're storing a 0, use WZR/XZR.
2975 if (Opcode == TargetOpcode::G_STORE) {
2977 LoadStore->getOperand(0).getReg(), MRI);
2978 if (CVal && CVal->Value == 0) {
2979 switch (LoadStore->getOpcode()) {
2980 case AArch64::STRWui:
2981 case AArch64::STRHHui:
2982 case AArch64::STRBBui:
2983 LoadStore->getOperand(0).setReg(AArch64::WZR);
2984 break;
2985 case AArch64::STRXui:
2986 LoadStore->getOperand(0).setReg(AArch64::XZR);
2987 break;
2988 }
2989 }
2990 }
2991
2992 if (IsZExtLoad) {
2993 // The zextload from a smaller type to i32 should be handled by the
2994 // importer.
2995 if (MRI.getType(LoadStore->getOperand(0).getReg()).getSizeInBits() != 64)
2996 return false;
2997 // If we have a ZEXTLOAD then change the load's type to be a narrower reg
2998 // and zero_extend with SUBREG_TO_REG.
2999 Register LdReg = MRI.createVirtualRegister(&AArch64::GPR32RegClass);
3000 Register DstReg = LoadStore->getOperand(0).getReg();
3001 LoadStore->getOperand(0).setReg(LdReg);
3002
3003 MIB.setInsertPt(MIB.getMBB(), std::next(LoadStore->getIterator()));
3004 MIB.buildInstr(AArch64::SUBREG_TO_REG, {DstReg}, {})
3005 .addImm(0)
3006 .addUse(LdReg)
3007 .addImm(AArch64::sub_32);
3008 constrainSelectedInstRegOperands(*LoadStore, TII, TRI, RBI);
3009 return RBI.constrainGenericRegister(DstReg, AArch64::GPR64allRegClass,
3010 MRI);
3011 }
3012 return constrainSelectedInstRegOperands(*LoadStore, TII, TRI, RBI);
3013 }
3014
3015 case TargetOpcode::G_INDEXED_ZEXTLOAD:
3016 case TargetOpcode::G_INDEXED_SEXTLOAD:
3017 return selectIndexedExtLoad(I, MRI);
3018 case TargetOpcode::G_INDEXED_LOAD:
3019 return selectIndexedLoad(I, MRI);
3020 case TargetOpcode::G_INDEXED_STORE:
3021 return selectIndexedStore(cast<GIndexedStore>(I), MRI);
3022
3023 case TargetOpcode::G_LSHR:
3024 case TargetOpcode::G_ASHR:
3025 if (MRI.getType(I.getOperand(0).getReg()).isVector())
3026 return selectVectorAshrLshr(I, MRI);
3027 [[fallthrough]];
3028 case TargetOpcode::G_SHL:
3029 if (Opcode == TargetOpcode::G_SHL &&
3030 MRI.getType(I.getOperand(0).getReg()).isVector())
3031 return selectVectorSHL(I, MRI);
3032
3033 // These shifts were legalized to have 64 bit shift amounts because we
3034 // want to take advantage of the selection patterns that assume the
3035 // immediates are s64s, however, selectBinaryOp will assume both operands
3036 // will have the same bit size.
3037 {
3038 Register SrcReg = I.getOperand(1).getReg();
3039 Register ShiftReg = I.getOperand(2).getReg();
3040 const LLT ShiftTy = MRI.getType(ShiftReg);
3041 const LLT SrcTy = MRI.getType(SrcReg);
3042 if (!SrcTy.isVector() && SrcTy.getSizeInBits() == 32 &&
3043 ShiftTy.getSizeInBits() == 64) {
3044 assert(!ShiftTy.isVector() && "unexpected vector shift ty");
3045 // Insert a subregister copy to implement a 64->32 trunc
3046 auto Trunc = MIB.buildInstr(TargetOpcode::COPY, {SrcTy}, {})
3047 .addReg(ShiftReg, 0, AArch64::sub_32);
3048 MRI.setRegBank(Trunc.getReg(0), RBI.getRegBank(AArch64::GPRRegBankID));
3049 I.getOperand(2).setReg(Trunc.getReg(0));
3050 }
3051 }
3052 [[fallthrough]];
3053 case TargetOpcode::G_OR: {
3054 // Reject the various things we don't support yet.
3055 if (unsupportedBinOp(I, RBI, MRI, TRI))
3056 return false;
3057
3058 const unsigned OpSize = Ty.getSizeInBits();
3059
3060 const Register DefReg = I.getOperand(0).getReg();
3061 const RegisterBank &RB = *RBI.getRegBank(DefReg, MRI, TRI);
3062
3063 const unsigned NewOpc = selectBinaryOp(I.getOpcode(), RB.getID(), OpSize);
3064 if (NewOpc == I.getOpcode())
3065 return false;
3066
3067 I.setDesc(TII.get(NewOpc));
3068 // FIXME: Should the type be always reset in setDesc?
3069
3070 // Now that we selected an opcode, we need to constrain the register
3071 // operands to use appropriate classes.
3073 }
3074
3075 case TargetOpcode::G_PTR_ADD: {
3076 emitADD(I.getOperand(0).getReg(), I.getOperand(1), I.getOperand(2), MIB);
3077 I.eraseFromParent();
3078 return true;
3079 }
3080
3081 case TargetOpcode::G_SADDE:
3082 case TargetOpcode::G_UADDE:
3083 case TargetOpcode::G_SSUBE:
3084 case TargetOpcode::G_USUBE:
3085 case TargetOpcode::G_SADDO:
3086 case TargetOpcode::G_UADDO:
3087 case TargetOpcode::G_SSUBO:
3088 case TargetOpcode::G_USUBO:
3089 return selectOverflowOp(I, MRI);
3090
3091 case TargetOpcode::G_PTRMASK: {
3092 Register MaskReg = I.getOperand(2).getReg();
3093 std::optional<int64_t> MaskVal = getIConstantVRegSExtVal(MaskReg, MRI);
3094 // TODO: Implement arbitrary cases
3095 if (!MaskVal || !isShiftedMask_64(*MaskVal))
3096 return false;
3097
3098 uint64_t Mask = *MaskVal;
3099 I.setDesc(TII.get(AArch64::ANDXri));
3100 I.getOperand(2).ChangeToImmediate(
3102
3104 }
3105 case TargetOpcode::G_PTRTOINT:
3106 case TargetOpcode::G_TRUNC: {
3107 const LLT DstTy = MRI.getType(I.getOperand(0).getReg());
3108 const LLT SrcTy = MRI.getType(I.getOperand(1).getReg());
3109
3110 const Register DstReg = I.getOperand(0).getReg();
3111 const Register SrcReg = I.getOperand(1).getReg();
3112
3113 const RegisterBank &DstRB = *RBI.getRegBank(DstReg, MRI, TRI);
3114 const RegisterBank &SrcRB = *RBI.getRegBank(SrcReg, MRI, TRI);
3115
3116 if (DstRB.getID() != SrcRB.getID()) {
3117 LLVM_DEBUG(
3118 dbgs() << "G_TRUNC/G_PTRTOINT input/output on different banks\n");
3119 return false;
3120 }
3121
3122 if (DstRB.getID() == AArch64::GPRRegBankID) {
3123 const TargetRegisterClass *DstRC = getRegClassForTypeOnBank(DstTy, DstRB);
3124 if (!DstRC)
3125 return false;
3126
3127 const TargetRegisterClass *SrcRC = getRegClassForTypeOnBank(SrcTy, SrcRB);
3128 if (!SrcRC)
3129 return false;
3130
3131 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
3132 !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
3133 LLVM_DEBUG(dbgs() << "Failed to constrain G_TRUNC/G_PTRTOINT\n");
3134 return false;
3135 }
3136
3137 if (DstRC == SrcRC) {
3138 // Nothing to be done
3139 } else if (Opcode == TargetOpcode::G_TRUNC && DstTy == LLT::scalar(32) &&
3140 SrcTy == LLT::scalar(64)) {
3141 llvm_unreachable("TableGen can import this case");
3142 return false;
3143 } else if (DstRC == &AArch64::GPR32RegClass &&
3144 SrcRC == &AArch64::GPR64RegClass) {
3145 I.getOperand(1).setSubReg(AArch64::sub_32);
3146 } else {
3147 LLVM_DEBUG(
3148 dbgs() << "Unhandled mismatched classes in G_TRUNC/G_PTRTOINT\n");
3149 return false;
3150 }
3151
3152 I.setDesc(TII.get(TargetOpcode::COPY));
3153 return true;
3154 } else if (DstRB.getID() == AArch64::FPRRegBankID) {
3155 if (DstTy == LLT::fixed_vector(4, 16) &&
3156 SrcTy == LLT::fixed_vector(4, 32)) {
3157 I.setDesc(TII.get(AArch64::XTNv4i16));
3159 return true;
3160 }
3161
3162 if (!SrcTy.isVector() && SrcTy.getSizeInBits() == 128) {
3163 MachineInstr *Extract = emitExtractVectorElt(
3164 DstReg, DstRB, LLT::scalar(DstTy.getSizeInBits()), SrcReg, 0, MIB);
3165 if (!Extract)
3166 return false;
3167 I.eraseFromParent();
3168 return true;
3169 }
3170
3171 // We might have a vector G_PTRTOINT, in which case just emit a COPY.
3172 if (Opcode == TargetOpcode::G_PTRTOINT) {
3173 assert(DstTy.isVector() && "Expected an FPR ptrtoint to be a vector");
3174 I.setDesc(TII.get(TargetOpcode::COPY));
3175 return selectCopy(I, TII, MRI, TRI, RBI);
3176 }
3177 }
3178
3179 return false;
3180 }
3181
3182 case TargetOpcode::G_ANYEXT: {
3183 if (selectUSMovFromExtend(I, MRI))
3184 return true;
3185
3186 const Register DstReg = I.getOperand(0).getReg();
3187 const Register SrcReg = I.getOperand(1).getReg();
3188
3189 const RegisterBank &RBDst = *RBI.getRegBank(DstReg, MRI, TRI);
3190 if (RBDst.getID() != AArch64::GPRRegBankID) {
3191 LLVM_DEBUG(dbgs() << "G_ANYEXT on bank: " << RBDst
3192 << ", expected: GPR\n");
3193 return false;
3194 }
3195
3196 const RegisterBank &RBSrc = *RBI.getRegBank(SrcReg, MRI, TRI);
3197 if (RBSrc.getID() != AArch64::GPRRegBankID) {
3198 LLVM_DEBUG(dbgs() << "G_ANYEXT on bank: " << RBSrc
3199 << ", expected: GPR\n");
3200 return false;
3201 }
3202
3203 const unsigned DstSize = MRI.getType(DstReg).getSizeInBits();
3204
3205 if (DstSize == 0) {
3206 LLVM_DEBUG(dbgs() << "G_ANYEXT operand has no size, not a gvreg?\n");
3207 return false;
3208 }
3209
3210 if (DstSize != 64 && DstSize > 32) {
3211 LLVM_DEBUG(dbgs() << "G_ANYEXT to size: " << DstSize
3212 << ", expected: 32 or 64\n");
3213 return false;
3214 }
3215 // At this point G_ANYEXT is just like a plain COPY, but we need
3216 // to explicitly form the 64-bit value if any.
3217 if (DstSize > 32) {
3218 Register ExtSrc = MRI.createVirtualRegister(&AArch64::GPR64allRegClass);
3219 BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::SUBREG_TO_REG))
3220 .addDef(ExtSrc)
3221 .addImm(0)
3222 .addUse(SrcReg)
3223 .addImm(AArch64::sub_32);
3224 I.getOperand(1).setReg(ExtSrc);
3225 }
3226 return selectCopy(I, TII, MRI, TRI, RBI);
3227 }
3228
3229 case TargetOpcode::G_ZEXT:
3230 case TargetOpcode::G_SEXT_INREG:
3231 case TargetOpcode::G_SEXT: {
3232 if (selectUSMovFromExtend(I, MRI))
3233 return true;
3234
3235 unsigned Opcode = I.getOpcode();
3236 const bool IsSigned = Opcode != TargetOpcode::G_ZEXT;
3237 const Register DefReg = I.getOperand(0).getReg();
3238 Register SrcReg = I.getOperand(1).getReg();
3239 const LLT DstTy = MRI.getType(DefReg);
3240 const LLT SrcTy = MRI.getType(SrcReg);
3241 unsigned DstSize = DstTy.getSizeInBits();
3242 unsigned SrcSize = SrcTy.getSizeInBits();
3243
3244 // SEXT_INREG has the same src reg size as dst, the size of the value to be
3245 // extended is encoded in the imm.
3246 if (Opcode == TargetOpcode::G_SEXT_INREG)
3247 SrcSize = I.getOperand(2).getImm();
3248
3249 if (DstTy.isVector())
3250 return false; // Should be handled by imported patterns.
3251
3252 assert((*RBI.getRegBank(DefReg, MRI, TRI)).getID() ==
3253 AArch64::GPRRegBankID &&
3254 "Unexpected ext regbank");
3255
3256 MachineInstr *ExtI;
3257
3258 // First check if we're extending the result of a load which has a dest type
3259 // smaller than 32 bits, then this zext is redundant. GPR32 is the smallest
3260 // GPR register on AArch64 and all loads which are smaller automatically
3261 // zero-extend the upper bits. E.g.
3262 // %v(s8) = G_LOAD %p, :: (load 1)
3263 // %v2(s32) = G_ZEXT %v(s8)
3264 if (!IsSigned) {
3265 auto *LoadMI = getOpcodeDef(TargetOpcode::G_LOAD, SrcReg, MRI);
3266 bool IsGPR =
3267 RBI.getRegBank(SrcReg, MRI, TRI)->getID() == AArch64::GPRRegBankID;
3268 if (LoadMI && IsGPR) {
3269 const MachineMemOperand *MemOp = *LoadMI->memoperands_begin();
3270 unsigned BytesLoaded = MemOp->getSize();
3271 if (BytesLoaded < 4 && SrcTy.getSizeInBytes() == BytesLoaded)
3272 return selectCopy(I, TII, MRI, TRI, RBI);
3273 }
3274
3275 // For the 32-bit -> 64-bit case, we can emit a mov (ORRWrs)
3276 // + SUBREG_TO_REG.
3277 if (IsGPR && SrcSize == 32 && DstSize == 64) {
3278 Register SubregToRegSrc =
3279 MRI.createVirtualRegister(&AArch64::GPR32RegClass);
3280 const Register ZReg = AArch64::WZR;
3281 MIB.buildInstr(AArch64::ORRWrs, {SubregToRegSrc}, {ZReg, SrcReg})
3282 .addImm(0);
3283
3284 MIB.buildInstr(AArch64::SUBREG_TO_REG, {DefReg}, {})
3285 .addImm(0)
3286 .addUse(SubregToRegSrc)
3287 .addImm(AArch64::sub_32);
3288
3289 if (!RBI.constrainGenericRegister(DefReg, AArch64::GPR64RegClass,
3290 MRI)) {
3291 LLVM_DEBUG(dbgs() << "Failed to constrain G_ZEXT destination\n");
3292 return false;
3293 }
3294
3295 if (!RBI.constrainGenericRegister(SrcReg, AArch64::GPR32RegClass,
3296 MRI)) {
3297 LLVM_DEBUG(dbgs() << "Failed to constrain G_ZEXT source\n");
3298 return false;
3299 }
3300
3301 I.eraseFromParent();
3302 return true;
3303 }
3304 }
3305
3306 if (DstSize == 64) {
3307 if (Opcode != TargetOpcode::G_SEXT_INREG) {
3308 // FIXME: Can we avoid manually doing this?
3309 if (!RBI.constrainGenericRegister(SrcReg, AArch64::GPR32RegClass,
3310 MRI)) {
3311 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(Opcode)
3312 << " operand\n");
3313 return false;
3314 }
3315 SrcReg = MIB.buildInstr(AArch64::SUBREG_TO_REG,
3316 {&AArch64::GPR64RegClass}, {})
3317 .addImm(0)
3318 .addUse(SrcReg)
3319 .addImm(AArch64::sub_32)
3320 .getReg(0);
3321 }
3322
3323 ExtI = MIB.buildInstr(IsSigned ? AArch64::SBFMXri : AArch64::UBFMXri,
3324 {DefReg}, {SrcReg})
3325 .addImm(0)
3326 .addImm(SrcSize - 1);
3327 } else if (DstSize <= 32) {
3328 ExtI = MIB.buildInstr(IsSigned ? AArch64::SBFMWri : AArch64::UBFMWri,
3329 {DefReg}, {SrcReg})
3330 .addImm(0)
3331 .addImm(SrcSize - 1);
3332 } else {
3333 return false;
3334 }
3335
3337 I.eraseFromParent();
3338 return true;
3339 }
3340
3341 case TargetOpcode::G_SITOFP:
3342 case TargetOpcode::G_UITOFP:
3343 case TargetOpcode::G_FPTOSI:
3344 case TargetOpcode::G_FPTOUI: {
3345 const LLT DstTy = MRI.getType(I.getOperand(0).getReg()),
3346 SrcTy = MRI.getType(I.getOperand(1).getReg());
3347 const unsigned NewOpc = selectFPConvOpc(Opcode, DstTy, SrcTy);
3348 if (NewOpc == Opcode)
3349 return false;
3350
3351 I.setDesc(TII.get(NewOpc));
3353 I.setFlags(MachineInstr::NoFPExcept);
3354
3355 return true;
3356 }
3357
3358 case TargetOpcode::G_FREEZE:
3359 return selectCopy(I, TII, MRI, TRI, RBI);
3360
3361 case TargetOpcode::G_INTTOPTR:
3362 // The importer is currently unable to import pointer types since they
3363 // didn't exist in SelectionDAG.
3364 return selectCopy(I, TII, MRI, TRI, RBI);
3365
3366 case TargetOpcode::G_BITCAST:
3367 // Imported SelectionDAG rules can handle every bitcast except those that
3368 // bitcast from a type to the same type. Ideally, these shouldn't occur
3369 // but we might not run an optimizer that deletes them. The other exception
3370 // is bitcasts involving pointer types, as SelectionDAG has no knowledge
3371 // of them.
3372 return selectCopy(I, TII, MRI, TRI, RBI);
3373
3374 case TargetOpcode::G_SELECT: {
3375 auto &Sel = cast<GSelect>(I);
3376 const Register CondReg = Sel.getCondReg();
3377 const Register TReg = Sel.getTrueReg();
3378 const Register FReg = Sel.getFalseReg();
3379
3380 if (tryOptSelect(Sel))
3381 return true;
3382
3383 // Make sure to use an unused vreg instead of wzr, so that the peephole
3384 // optimizations will be able to optimize these.
3385 Register DeadVReg = MRI.createVirtualRegister(&AArch64::GPR32RegClass);
3386 auto TstMI = MIB.buildInstr(AArch64::ANDSWri, {DeadVReg}, {CondReg})
3387 .addImm(AArch64_AM::encodeLogicalImmediate(1, 32));
3389 if (!emitSelect(Sel.getReg(0), TReg, FReg, AArch64CC::NE, MIB))
3390 return false;
3391 Sel.eraseFromParent();
3392 return true;
3393 }
3394 case TargetOpcode::G_ICMP: {
3395 if (Ty.isVector())
3396 return selectVectorICmp(I, MRI);
3397
3398 if (Ty != LLT::scalar(32)) {
3399 LLVM_DEBUG(dbgs() << "G_ICMP result has type: " << Ty
3400 << ", expected: " << LLT::scalar(32) << '\n');
3401 return false;
3402 }
3403
3404 auto Pred = static_cast<CmpInst::Predicate>(I.getOperand(1).getPredicate());
3405 const AArch64CC::CondCode InvCC =
3407 emitIntegerCompare(I.getOperand(2), I.getOperand(3), I.getOperand(1), MIB);
3408 emitCSINC(/*Dst=*/I.getOperand(0).getReg(), /*Src1=*/AArch64::WZR,
3409 /*Src2=*/AArch64::WZR, InvCC, MIB);
3410 I.eraseFromParent();
3411 return true;
3412 }
3413
3414 case TargetOpcode::G_FCMP: {
3415 CmpInst::Predicate Pred =
3416 static_cast<CmpInst::Predicate>(I.getOperand(1).getPredicate());
3417 if (!emitFPCompare(I.getOperand(2).getReg(), I.getOperand(3).getReg(), MIB,
3418 Pred) ||
3419 !emitCSetForFCmp(I.getOperand(0).getReg(), Pred, MIB))
3420 return false;
3421 I.eraseFromParent();
3422 return true;
3423 }
3424 case TargetOpcode::G_VASTART:
3425 return STI.isTargetDarwin() ? selectVaStartDarwin(I, MF, MRI)
3426 : selectVaStartAAPCS(I, MF, MRI);
3427 case TargetOpcode::G_INTRINSIC:
3428 return selectIntrinsic(I, MRI);
3429 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
3430 return selectIntrinsicWithSideEffects(I, MRI);
3431 case TargetOpcode::G_IMPLICIT_DEF: {
3432 I.setDesc(TII.get(TargetOpcode::IMPLICIT_DEF));
3433 const LLT DstTy = MRI.getType(I.getOperand(0).getReg());
3434 const Register DstReg = I.getOperand(0).getReg();
3435 const RegisterBank &DstRB = *RBI.getRegBank(DstReg, MRI, TRI);
3436 const TargetRegisterClass *DstRC = getRegClassForTypeOnBank(DstTy, DstRB);
3437 RBI.constrainGenericRegister(DstReg, *DstRC, MRI);
3438 return true;
3439 }
3440 case TargetOpcode::G_BLOCK_ADDR: {
3441 if (TM.getCodeModel() == CodeModel::Large && !TM.isPositionIndependent()) {
3442 materializeLargeCMVal(I, I.getOperand(1).getBlockAddress(), 0);
3443 I.eraseFromParent();
3444 return true;
3445 } else {
3446 I.setDesc(TII.get(AArch64::MOVaddrBA));
3447 auto MovMI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::MOVaddrBA),
3448 I.getOperand(0).getReg())
3449 .addBlockAddress(I.getOperand(1).getBlockAddress(),
3450 /* Offset */ 0, AArch64II::MO_PAGE)
3452 I.getOperand(1).getBlockAddress(), /* Offset */ 0,
3454 I.eraseFromParent();
3455 return constrainSelectedInstRegOperands(*MovMI, TII, TRI, RBI);
3456 }
3457 }
3458 case AArch64::G_DUP: {
3459 // When the scalar of G_DUP is an s8/s16 gpr, they can't be selected by
3460 // imported patterns. Do it manually here. Avoiding generating s16 gpr is
3461 // difficult because at RBS we may end up pessimizing the fpr case if we
3462 // decided to add an anyextend to fix this. Manual selection is the most
3463 // robust solution for now.
3464 if (RBI.getRegBank(I.getOperand(1).getReg(), MRI, TRI)->getID() !=
3465 AArch64::GPRRegBankID)
3466 return false; // We expect the fpr regbank case to be imported.
3467 LLT VecTy = MRI.getType(I.getOperand(0).getReg());
3468 if (VecTy == LLT::fixed_vector(8, 8))
3469 I.setDesc(TII.get(AArch64::DUPv8i8gpr));
3470 else if (VecTy == LLT::fixed_vector(16, 8))
3471 I.setDesc(TII.get(AArch64::DUPv16i8gpr));
3472 else if (VecTy == LLT::fixed_vector(4, 16))
3473 I.setDesc(TII.get(AArch64::DUPv4i16gpr));
3474 else if (VecTy == LLT::fixed_vector(8, 16))
3475 I.setDesc(TII.get(AArch64::DUPv8i16gpr));
3476 else
3477 return false;
3479 }
3480 case TargetOpcode::G_BUILD_VECTOR:
3481 return selectBuildVector(I, MRI);
3482 case TargetOpcode::G_MERGE_VALUES:
3483 return selectMergeValues(I, MRI);
3484 case TargetOpcode::G_UNMERGE_VALUES:
3485 return selectUnmergeValues(I, MRI);
3486 case TargetOpcode::G_SHUFFLE_VECTOR:
3487 return selectShuffleVector(I, MRI);
3488 case TargetOpcode::G_EXTRACT_VECTOR_ELT:
3489 return selectExtractElt(I, MRI);
3490 case TargetOpcode::G_INSERT_VECTOR_ELT:
3491 return selectInsertElt(I, MRI);
3492 case TargetOpcode::G_CONCAT_VECTORS:
3493 return selectConcatVectors(I, MRI);
3494 case TargetOpcode::G_JUMP_TABLE:
3495 return selectJumpTable(I, MRI);
3496 case TargetOpcode::G_MEMCPY:
3497 case TargetOpcode::G_MEMCPY_INLINE:
3498 case TargetOpcode::G_MEMMOVE:
3499 case TargetOpcode::G_MEMSET:
3500 assert(STI.hasMOPS() && "Shouldn't get here without +mops feature");
3501 return selectMOPS(I, MRI);
3502 }
3503
3504 return false;
3505}
3506
3507bool AArch64InstructionSelector::selectAndRestoreState(MachineInstr &I) {
3508 MachineIRBuilderState OldMIBState = MIB.getState();
3509 bool Success = select(I);
3510 MIB.setState(OldMIBState);
3511 return Success;
3512}
3513
3514bool AArch64InstructionSelector::selectMOPS(MachineInstr &GI,
3516 unsigned Mopcode;
3517 switch (GI.getOpcode()) {
3518 case TargetOpcode::G_MEMCPY:
3519 case TargetOpcode::G_MEMCPY_INLINE:
3520 Mopcode = AArch64::MOPSMemoryCopyPseudo;
3521 break;
3522 case TargetOpcode::G_MEMMOVE:
3523 Mopcode = AArch64::MOPSMemoryMovePseudo;
3524 break;
3525 case TargetOpcode::G_MEMSET:
3526 // For tagged memset see llvm.aarch64.mops.memset.tag
3527 Mopcode = AArch64::MOPSMemorySetPseudo;
3528 break;
3529 }
3530
3531 auto &DstPtr = GI.getOperand(0);
3532 auto &SrcOrVal = GI.getOperand(1);
3533 auto &Size = GI.getOperand(2);
3534
3535 // Create copies of the registers that can be clobbered.
3536 const Register DstPtrCopy = MRI.cloneVirtualRegister(DstPtr.getReg());
3537 const Register SrcValCopy = MRI.cloneVirtualRegister(SrcOrVal.getReg());
3538 const Register SizeCopy = MRI.cloneVirtualRegister(Size.getReg());
3539
3540 const bool IsSet = Mopcode == AArch64::MOPSMemorySetPseudo;
3541 const auto &SrcValRegClass =
3542 IsSet ? AArch64::GPR64RegClass : AArch64::GPR64commonRegClass;
3543
3544 // Constrain to specific registers
3545 RBI.constrainGenericRegister(DstPtrCopy, AArch64::GPR64commonRegClass, MRI);
3546 RBI.constrainGenericRegister(SrcValCopy, SrcValRegClass, MRI);
3547 RBI.constrainGenericRegister(SizeCopy, AArch64::GPR64RegClass, MRI);
3548
3549 MIB.buildCopy(DstPtrCopy, DstPtr);
3550 MIB.buildCopy(SrcValCopy, SrcOrVal);
3551 MIB.buildCopy(SizeCopy, Size);
3552
3553 // New instruction uses the copied registers because it must update them.
3554 // The defs are not used since they don't exist in G_MEM*. They are still
3555 // tied.
3556 // Note: order of operands is different from G_MEMSET, G_MEMCPY, G_MEMMOVE
3557 Register DefDstPtr = MRI.createVirtualRegister(&AArch64::GPR64commonRegClass);
3558 Register DefSize = MRI.createVirtualRegister(&AArch64::GPR64RegClass);
3559 if (IsSet) {
3560 MIB.buildInstr(Mopcode, {DefDstPtr, DefSize},
3561 {DstPtrCopy, SizeCopy, SrcValCopy});
3562 } else {
3563 Register DefSrcPtr = MRI.createVirtualRegister(&SrcValRegClass);
3564 MIB.buildInstr(Mopcode, {DefDstPtr, DefSrcPtr, DefSize},
3565 {DstPtrCopy, SrcValCopy, SizeCopy});
3566 }
3567
3568 GI.eraseFromParent();
3569 return true;
3570}
3571
3572bool AArch64InstructionSelector::selectBrJT(MachineInstr &I,
3574 assert(I.getOpcode() == TargetOpcode::G_BRJT && "Expected G_BRJT");
3575 Register JTAddr = I.getOperand(0).getReg();
3576 unsigned JTI = I.getOperand(1).getIndex();
3577 Register Index = I.getOperand(2).getReg();
3578
3579 Register TargetReg = MRI.createVirtualRegister(&AArch64::GPR64RegClass);
3580 Register ScratchReg = MRI.createVirtualRegister(&AArch64::GPR64spRegClass);
3581
3582 MF->getInfo<AArch64FunctionInfo>()->setJumpTableEntryInfo(JTI, 4, nullptr);
3583 auto JumpTableInst = MIB.buildInstr(AArch64::JumpTableDest32,
3584 {TargetReg, ScratchReg}, {JTAddr, Index})
3585 .addJumpTableIndex(JTI);
3586 // Save the jump table info.
3587 MIB.buildInstr(TargetOpcode::JUMP_TABLE_DEBUG_INFO, {},
3588 {static_cast<int64_t>(JTI)});
3589 // Build the indirect branch.
3590 MIB.buildInstr(AArch64::BR, {}, {TargetReg});
3591 I.eraseFromParent();
3592 return constrainSelectedInstRegOperands(*JumpTableInst, TII, TRI, RBI);
3593}
3594
3595bool AArch64InstructionSelector::selectJumpTable(MachineInstr &I,
3597 assert(I.getOpcode() == TargetOpcode::G_JUMP_TABLE && "Expected jump table");
3598 assert(I.getOperand(1).isJTI() && "Jump table op should have a JTI!");
3599
3600 Register DstReg = I.getOperand(0).getReg();
3601 unsigned JTI = I.getOperand(1).getIndex();
3602 // We generate a MOVaddrJT which will get expanded to an ADRP + ADD later.
3603 auto MovMI =
3604 MIB.buildInstr(AArch64::MOVaddrJT, {DstReg}, {})
3605 .addJumpTableIndex(JTI, AArch64II::MO_PAGE)
3607 I.eraseFromParent();
3608 return constrainSelectedInstRegOperands(*MovMI, TII, TRI, RBI);
3609}
3610
3611bool AArch64InstructionSelector::selectTLSGlobalValue(
3613 if (!STI.isTargetMachO())
3614 return false;
3615 MachineFunction &MF = *I.getParent()->getParent();
3616 MF.getFrameInfo().setAdjustsStack(true);
3617
3618 const auto &GlobalOp = I.getOperand(1);
3619 assert(GlobalOp.getOffset() == 0 &&
3620 "Shouldn't have an offset on TLS globals!");
3621 const GlobalValue &GV = *GlobalOp.getGlobal();
3622
3623 auto LoadGOT =
3624 MIB.buildInstr(AArch64::LOADgot, {&AArch64::GPR64commonRegClass}, {})
3625 .addGlobalAddress(&GV, 0, AArch64II::MO_TLS);
3626
3627 auto Load = MIB.buildInstr(AArch64::LDRXui, {&AArch64::GPR64commonRegClass},
3628 {LoadGOT.getReg(0)})
3629 .addImm(0);
3630
3631 MIB.buildCopy(Register(AArch64::X0), LoadGOT.getReg(0));
3632 // TLS calls preserve all registers except those that absolutely must be
3633 // trashed: X0 (it takes an argument), LR (it's a call) and NZCV (let's not be
3634 // silly).
3635 MIB.buildInstr(getBLRCallOpcode(MF), {}, {Load})
3636 .addUse(AArch64::X0, RegState::Implicit)
3637 .addDef(AArch64::X0, RegState::Implicit)
3638 .addRegMask(TRI.getTLSCallPreservedMask());
3639
3640 MIB.buildCopy(I.getOperand(0).getReg(), Register(AArch64::X0));
3641 RBI.constrainGenericRegister(I.getOperand(0).getReg(), AArch64::GPR64RegClass,
3642 MRI);
3643 I.eraseFromParent();
3644 return true;
3645}
3646
3647bool AArch64InstructionSelector::selectVectorICmp(
3649 Register DstReg = I.getOperand(0).getReg();
3650 LLT DstTy = MRI.getType(DstReg);
3651 Register SrcReg = I.getOperand(2).getReg();
3652 Register Src2Reg = I.getOperand(3).getReg();
3653 LLT SrcTy = MRI.getType(SrcReg);
3654
3655 unsigned SrcEltSize = SrcTy.getElementType().getSizeInBits();
3656 unsigned NumElts = DstTy.getNumElements();
3657
3658 // First index is element size, 0 == 8b, 1 == 16b, 2 == 32b, 3 == 64b
3659 // Second index is num elts, 0 == v2, 1 == v4, 2 == v8, 3 == v16
3660 // Third index is cc opcode:
3661 // 0 == eq
3662 // 1 == ugt
3663 // 2 == uge
3664 // 3 == ult
3665 // 4 == ule
3666 // 5 == sgt
3667 // 6 == sge
3668 // 7 == slt
3669 // 8 == sle
3670 // ne is done by negating 'eq' result.
3671
3672 // This table below assumes that for some comparisons the operands will be
3673 // commuted.
3674 // ult op == commute + ugt op
3675 // ule op == commute + uge op
3676 // slt op == commute + sgt op
3677 // sle op == commute + sge op
3678 unsigned PredIdx = 0;
3679 bool SwapOperands = false;
3680 CmpInst::Predicate Pred = (CmpInst::Predicate)I.getOperand(1).getPredicate();
3681 switch (Pred) {
3682 case CmpInst::ICMP_NE:
3683 case CmpInst::ICMP_EQ:
3684 PredIdx = 0;
3685 break;
3686 case CmpInst::ICMP_UGT:
3687 PredIdx = 1;
3688 break;
3689 case CmpInst::ICMP_UGE:
3690 PredIdx = 2;
3691 break;
3692 case CmpInst::ICMP_ULT:
3693 PredIdx = 3;
3694 SwapOperands = true;
3695 break;
3696 case CmpInst::ICMP_ULE:
3697 PredIdx = 4;
3698 SwapOperands = true;
3699 break;
3700 case CmpInst::ICMP_SGT:
3701 PredIdx = 5;
3702 break;
3703 case CmpInst::ICMP_SGE:
3704 PredIdx = 6;
3705 break;
3706 case CmpInst::ICMP_SLT:
3707 PredIdx = 7;
3708 SwapOperands = true;
3709 break;
3710 case CmpInst::ICMP_SLE:
3711 PredIdx = 8;
3712 SwapOperands = true;
3713 break;
3714 default:
3715 llvm_unreachable("Unhandled icmp predicate");
3716 return false;
3717 }
3718
3719 // This table obviously should be tablegen'd when we have our GISel native
3720 // tablegen selector.
3721
3722 static const unsigned OpcTable[4][4][9] = {
3723 {
3724 {0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */,
3725 0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */,
3726 0 /* invalid */},
3727 {0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */,
3728 0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */,
3729 0 /* invalid */},
3730 {AArch64::CMEQv8i8, AArch64::CMHIv8i8, AArch64::CMHSv8i8,
3731 AArch64::CMHIv8i8, AArch64::CMHSv8i8, AArch64::CMGTv8i8,
3732 AArch64::CMGEv8i8, AArch64::CMGTv8i8, AArch64::CMGEv8i8},
3733 {AArch64::CMEQv16i8, AArch64::CMHIv16i8, AArch64::CMHSv16i8,
3734 AArch64::CMHIv16i8, AArch64::CMHSv16i8, AArch64::CMGTv16i8,
3735 AArch64::CMGEv16i8, AArch64::CMGTv16i8, AArch64::CMGEv16i8}
3736 },
3737 {
3738 {0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */,
3739 0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */,
3740 0 /* invalid */},
3741 {AArch64::CMEQv4i16, AArch64::CMHIv4i16, AArch64::CMHSv4i16,
3742 AArch64::CMHIv4i16, AArch64::CMHSv4i16, AArch64::CMGTv4i16,
3743 AArch64::CMGEv4i16, AArch64::CMGTv4i16, AArch64::CMGEv4i16},
3744 {AArch64::CMEQv8i16, AArch64::CMHIv8i16, AArch64::CMHSv8i16,
3745 AArch64::CMHIv8i16, AArch64::CMHSv8i16, AArch64::CMGTv8i16,
3746 AArch64::CMGEv8i16, AArch64::CMGTv8i16, AArch64::CMGEv8i16},
3747 {0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */,
3748 0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */,
3749 0 /* invalid */}
3750 },
3751 {
3752 {AArch64::CMEQv2i32, AArch64::CMHIv2i32, AArch64::CMHSv2i32,
3753 AArch64::CMHIv2i32, AArch64::CMHSv2i32, AArch64::CMGTv2i32,
3754 AArch64::CMGEv2i32, AArch64::CMGTv2i32, AArch64::CMGEv2i32},
3755 {AArch64::CMEQv4i32, AArch64::CMHIv4i32, AArch64::CMHSv4i32,
3756 AArch64::CMHIv4i32, AArch64::CMHSv4i32, AArch64::CMGTv4i32,
3757 AArch64::CMGEv4i32, AArch64::CMGTv4i32, AArch64::CMGEv4i32},
3758 {0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */,
3759 0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */,
3760 0 /* invalid */},
3761 {0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */,
3762 0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */,
3763 0 /* invalid */}
3764 },
3765 {
3766 {AArch64::CMEQv2i64, AArch64::CMHIv2i64, AArch64::CMHSv2i64,
3767 AArch64::CMHIv2i64, AArch64::CMHSv2i64, AArch64::CMGTv2i64,
3768 AArch64::CMGEv2i64, AArch64::CMGTv2i64, AArch64::CMGEv2i64},
3769 {0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */,
3770 0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */,
3771 0 /* invalid */},
3772 {0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */,
3773 0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */,
3774 0 /* invalid */},
3775 {0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */,
3776 0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */,
3777 0 /* invalid */}
3778 },
3779 };
3780 unsigned EltIdx = Log2_32(SrcEltSize / 8);
3781 unsigned NumEltsIdx = Log2_32(NumElts / 2);
3782 unsigned Opc = OpcTable[EltIdx][NumEltsIdx][PredIdx];
3783 if (!Opc) {
3784 LLVM_DEBUG(dbgs() << "Could not map G_ICMP to cmp opcode");
3785 return false;
3786 }
3787
3788 const RegisterBank &VecRB = *RBI.getRegBank(SrcReg, MRI, TRI);
3789 const TargetRegisterClass *SrcRC =
3790 getRegClassForTypeOnBank(SrcTy, VecRB, true);
3791 if (!SrcRC) {
3792 LLVM_DEBUG(dbgs() << "Could not determine source register class.\n");
3793 return false;
3794 }
3795
3796 unsigned NotOpc = Pred == ICmpInst::ICMP_NE ? AArch64::NOTv8i8 : 0;
3797 if (SrcTy.getSizeInBits() == 128)
3798 NotOpc = NotOpc ? AArch64::NOTv16i8 : 0;
3799
3800 if (SwapOperands)
3801 std::swap(SrcReg, Src2Reg);
3802
3803 auto Cmp = MIB.buildInstr(Opc, {SrcRC}, {SrcReg, Src2Reg});
3805
3806 // Invert if we had a 'ne' cc.
3807 if (NotOpc) {
3808 Cmp = MIB.buildInstr(NotOpc, {DstReg}, {Cmp});
3810 } else {
3811 MIB.buildCopy(DstReg, Cmp.getReg(0));
3812 }
3813 RBI.constrainGenericRegister(DstReg, *SrcRC, MRI);
3814 I.eraseFromParent();
3815 return true;
3816}
3817
3818MachineInstr *AArch64InstructionSelector::emitScalarToVector(
3819 unsigned EltSize, const TargetRegisterClass *DstRC, Register Scalar,
3820 MachineIRBuilder &MIRBuilder) const {
3821 auto Undef = MIRBuilder.buildInstr(TargetOpcode::IMPLICIT_DEF, {DstRC}, {});
3822
3823 auto BuildFn = [&](unsigned SubregIndex) {
3824 auto Ins =
3825 MIRBuilder
3826 .buildInstr(TargetOpcode::INSERT_SUBREG, {DstRC}, {Undef, Scalar})
3827 .addImm(SubregIndex);
3830 return &*Ins;
3831 };
3832
3833 switch (EltSize) {
3834 case 8:
3835 return BuildFn(AArch64::bsub);
3836 case 16:
3837 return BuildFn(AArch64::hsub);
3838 case 32:
3839 return BuildFn(AArch64::ssub);
3840 case 64:
3841 return BuildFn(AArch64::dsub);
3842 default:
3843 return nullptr;
3844 }
3845}
3846
3848AArch64InstructionSelector::emitNarrowVector(Register DstReg, Register SrcReg,
3849 MachineIRBuilder &MIB,
3850 MachineRegisterInfo &MRI) const {
3851 LLT DstTy = MRI.getType(DstReg);
3852 const TargetRegisterClass *RC =
3853 getRegClassForTypeOnBank(DstTy, *RBI.getRegBank(SrcReg, MRI, TRI));
3854 if (RC != &AArch64::FPR32RegClass && RC != &AArch64::FPR64RegClass) {
3855 LLVM_DEBUG(dbgs() << "Unsupported register class!\n");
3856 return nullptr;
3857 }
3858 unsigned SubReg = 0;
3859 if (!getSubRegForClass(RC, TRI, SubReg))
3860 return nullptr;
3861 if (SubReg != AArch64::ssub && SubReg != AArch64::dsub) {
3862 LLVM_DEBUG(dbgs() << "Unsupported destination size! ("
3863 << DstTy.getSizeInBits() << "\n");
3864 return nullptr;
3865 }
3866 auto Copy = MIB.buildInstr(TargetOpcode::COPY, {DstReg}, {})
3867 .addReg(SrcReg, 0, SubReg);
3868 RBI.constrainGenericRegister(DstReg, *RC, MRI);
3869 return Copy;
3870}
3871
3872bool AArch64InstructionSelector::selectMergeValues(
3874 assert(I.getOpcode() == TargetOpcode::G_MERGE_VALUES && "unexpected opcode");
3875 const LLT DstTy = MRI.getType(I.getOperand(0).getReg());
3876 const LLT SrcTy = MRI.getType(I.getOperand(1).getReg());
3877 assert(!DstTy.isVector() && !SrcTy.isVector() && "invalid merge operation");
3878 const RegisterBank &RB = *RBI.getRegBank(I.getOperand(1).getReg(), MRI, TRI);
3879
3880 if (I.getNumOperands() != 3)
3881 return false;
3882
3883 // Merging 2 s64s into an s128.
3884 if (DstTy == LLT::scalar(128)) {
3885 if (SrcTy.getSizeInBits() != 64)
3886 return false;
3887 Register DstReg = I.getOperand(0).getReg();
3888 Register Src1Reg = I.getOperand(1).getReg();
3889 Register Src2Reg = I.getOperand(2).getReg();
3890 auto Tmp = MIB.buildInstr(TargetOpcode::IMPLICIT_DEF, {DstTy}, {});
3891 MachineInstr *InsMI = emitLaneInsert(std::nullopt, Tmp.getReg(0), Src1Reg,
3892 /* LaneIdx */ 0, RB, MIB);
3893 if (!InsMI)
3894 return false;
3895 MachineInstr *Ins2MI = emitLaneInsert(DstReg, InsMI->getOperand(0).getReg(),
3896 Src2Reg, /* LaneIdx */ 1, RB, MIB);
3897 if (!Ins2MI)
3898 return false;
3901 I.eraseFromParent();
3902 return true;
3903 }
3904
3905 if (RB.getID() != AArch64::GPRRegBankID)
3906 return false;
3907
3908 if (DstTy.getSizeInBits() != 64 || SrcTy.getSizeInBits() != 32)
3909 return false;
3910
3911 auto *DstRC = &AArch64::GPR64RegClass;
3912 Register SubToRegDef = MRI.createVirtualRegister(DstRC);
3913 MachineInstr &SubRegMI = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
3914 TII.get(TargetOpcode::SUBREG_TO_REG))
3915 .addDef(SubToRegDef)
3916 .addImm(0)
3917 .addUse(I.getOperand(1).getReg())
3918 .addImm(AArch64::sub_32);
3919 Register SubToRegDef2 = MRI.createVirtualRegister(DstRC);
3920 // Need to anyext the second scalar before we can use bfm
3921 MachineInstr &SubRegMI2 = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
3922 TII.get(TargetOpcode::SUBREG_TO_REG))
3923 .addDef(SubToRegDef2)
3924 .addImm(0)
3925 .addUse(I.getOperand(2).getReg())
3926 .addImm(AArch64::sub_32);
3927 MachineInstr &BFM =
3928 *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(AArch64::BFMXri))
3929 .addDef(I.getOperand(0).getReg())
3930 .addUse(SubToRegDef)
3931 .addUse(SubToRegDef2)
3932 .addImm(32)
3933 .addImm(31);
3934 constrainSelectedInstRegOperands(SubRegMI, TII, TRI, RBI);
3935 constrainSelectedInstRegOperands(SubRegMI2, TII, TRI, RBI);
3937 I.eraseFromParent();
3938 return true;
3939}
3940
3941static bool getLaneCopyOpcode(unsigned &CopyOpc, unsigned &ExtractSubReg,
3942 const unsigned EltSize) {
3943 // Choose a lane copy opcode and subregister based off of the size of the
3944 // vector's elements.
3945 switch (EltSize) {
3946 case 8:
3947 CopyOpc = AArch64::DUPi8;
3948 ExtractSubReg = AArch64::bsub;
3949 break;
3950 case 16:
3951 CopyOpc = AArch64::DUPi16;
3952 ExtractSubReg = AArch64::hsub;
3953 break;
3954 case 32:
3955 CopyOpc = AArch64::DUPi32;
3956 ExtractSubReg = AArch64::ssub;
3957 break;
3958 case 64:
3959 CopyOpc = AArch64::DUPi64;
3960 ExtractSubReg = AArch64::dsub;
3961 break;
3962 default:
3963 // Unknown size, bail out.
3964 LLVM_DEBUG(dbgs() << "Elt size '" << EltSize << "' unsupported.\n");
3965 return false;
3966 }
3967 return true;
3968}
3969
3970MachineInstr *AArch64InstructionSelector::emitExtractVectorElt(
3971 std::optional<Register> DstReg, const RegisterBank &DstRB, LLT ScalarTy,
3972 Register VecReg, unsigned LaneIdx, MachineIRBuilder &MIRBuilder) const {
3973 MachineRegisterInfo &MRI = *MIRBuilder.getMRI();
3974 unsigned CopyOpc = 0;
3975 unsigned ExtractSubReg = 0;
3976 if (!getLaneCopyOpcode(CopyOpc, ExtractSubReg, ScalarTy.getSizeInBits())) {
3977 LLVM_DEBUG(
3978 dbgs() << "Couldn't determine lane copy opcode for instruction.\n");
3979 return nullptr;
3980 }
3981
3982 const TargetRegisterClass *DstRC =
3983 getRegClassForTypeOnBank(ScalarTy, DstRB, true);
3984 if (!DstRC) {
3985 LLVM_DEBUG(dbgs() << "Could not determine destination register class.\n");
3986 return nullptr;
3987 }
3988
3989 const RegisterBank &VecRB = *RBI.getRegBank(VecReg, MRI, TRI);
3990 const LLT &VecTy = MRI.getType(VecReg);
3991 const TargetRegisterClass *VecRC =
3992 getRegClassForTypeOnBank(VecTy, VecRB, true);
3993 if (!VecRC) {
3994 LLVM_DEBUG(dbgs() << "Could not determine source register class.\n");
3995 return nullptr;
3996 }
3997
3998 // The register that we're going to copy into.
3999 Register InsertReg = VecReg;
4000 if (!DstReg)
4001 DstReg = MRI.createVirtualRegister(DstRC);
4002 // If the lane index is 0, we just use a subregister COPY.
4003 if (LaneIdx == 0) {
4004 auto Copy = MIRBuilder.buildInstr(TargetOpcode::COPY, {*DstReg}, {})
4005 .addReg(VecReg, 0, ExtractSubReg);
4006 RBI.constrainGenericRegister(*DstReg, *DstRC, MRI);
4007 return &*Copy;
4008 }
4009
4010 // Lane copies require 128-bit wide registers. If we're dealing with an
4011 // unpacked vector, then we need to move up to that width. Insert an implicit
4012 // def and a subregister insert to get us there.
4013 if (VecTy.getSizeInBits() != 128) {
4014 MachineInstr *ScalarToVector = emitScalarToVector(
4015 VecTy.getSizeInBits(), &AArch64::FPR128RegClass, VecReg, MIRBuilder);
4016 if (!ScalarToVector)
4017 return nullptr;
4018 InsertReg = ScalarToVector->getOperand(0).getReg();
4019 }
4020
4021 MachineInstr *LaneCopyMI =
4022 MIRBuilder.buildInstr(CopyOpc, {*DstReg}, {InsertReg}).addImm(LaneIdx);
4023 constrainSelectedInstRegOperands(*LaneCopyMI, TII, TRI, RBI);
4024
4025 // Make sure that we actually constrain the initial copy.
4026 RBI.constrainGenericRegister(*DstReg, *DstRC, MRI);
4027 return LaneCopyMI;
4028}
4029
4030bool AArch64InstructionSelector::selectExtractElt(
4032 assert(I.getOpcode() == TargetOpcode::G_EXTRACT_VECTOR_ELT &&
4033 "unexpected opcode!");
4034 Register DstReg = I.getOperand(0).getReg();
4035 const LLT NarrowTy = MRI.getType(DstReg);
4036 const Register SrcReg = I.getOperand(1).getReg();
4037 const LLT WideTy = MRI.getType(SrcReg);
4038 (void)WideTy;
4039 assert(WideTy.getSizeInBits() >= NarrowTy.getSizeInBits() &&
4040 "source register size too small!");
4041 assert(!NarrowTy.isVector() && "cannot extract vector into vector!");
4042
4043 // Need the lane index to determine the correct copy opcode.
4044 MachineOperand &LaneIdxOp = I.getOperand(2);
4045 assert(LaneIdxOp.isReg() && "Lane index operand was not a register?");
4046
4047 if (RBI.getRegBank(DstReg, MRI, TRI)->getID() != AArch64::FPRRegBankID) {
4048 LLVM_DEBUG(dbgs() << "Cannot extract into GPR.\n");
4049 return false;
4050 }
4051
4052 // Find the index to extract from.
4053 auto VRegAndVal = getIConstantVRegValWithLookThrough(LaneIdxOp.getReg(), MRI);
4054 if (!VRegAndVal)
4055 return false;
4056 unsigned LaneIdx = VRegAndVal->Value.getSExtValue();
4057
4058
4059 const RegisterBank &DstRB = *RBI.getRegBank(DstReg, MRI, TRI);
4060 MachineInstr *Extract = emitExtractVectorElt(DstReg, DstRB, NarrowTy, SrcReg,
4061 LaneIdx, MIB);
4062 if (!Extract)
4063 return false;
4064
4065 I.eraseFromParent();
4066 return true;
4067}
4068
4069bool AArch64InstructionSelector::selectSplitVectorUnmerge(
4071 unsigned NumElts = I.getNumOperands() - 1;
4072 Register SrcReg = I.getOperand(NumElts).getReg();
4073 const LLT NarrowTy = MRI.getType(I.getOperand(0).getReg());
4074 const LLT SrcTy = MRI.getType(SrcReg);
4075
4076 assert(NarrowTy.isVector() && "Expected an unmerge into vectors");
4077 if (SrcTy.getSizeInBits() > 128) {
4078 LLVM_DEBUG(dbgs() << "Unexpected vector type for vec split unmerge");
4079 return false;
4080 }
4081
4082 // We implement a split vector operation by treating the sub-vectors as
4083 // scalars and extracting them.
4084 const RegisterBank &DstRB =
4085 *RBI.getRegBank(I.getOperand(0).getReg(), MRI, TRI);
4086 for (unsigned OpIdx = 0; OpIdx < NumElts; ++OpIdx) {
4087 Register Dst = I.getOperand(OpIdx).getReg();
4088 MachineInstr *Extract =
4089 emitExtractVectorElt(Dst, DstRB, NarrowTy, SrcReg, OpIdx, MIB);
4090 if (!Extract)
4091 return false;
4092 }
4093 I.eraseFromParent();
4094 return true;
4095}
4096
4097bool AArch64InstructionSelector::selectUnmergeValues(MachineInstr &I,
4099 assert(I.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&
4100 "unexpected opcode");
4101
4102 // TODO: Handle unmerging into GPRs and from scalars to scalars.
4103 if (RBI.getRegBank(I.getOperand(0).getReg(), MRI, TRI)->getID() !=
4104 AArch64::FPRRegBankID ||
4105 RBI.getRegBank(I.getOperand(1).getReg(), MRI, TRI)->getID() !=
4106 AArch64::FPRRegBankID) {
4107 LLVM_DEBUG(dbgs() << "Unmerging vector-to-gpr and scalar-to-scalar "
4108 "currently unsupported.\n");
4109 return false;
4110 }
4111
4112 // The last operand is the vector source register, and every other operand is
4113 // a register to unpack into.
4114 unsigned NumElts = I.getNumOperands() - 1;
4115 Register SrcReg = I.getOperand(NumElts).getReg();
4116 const LLT NarrowTy = MRI.getType(I.getOperand(0).getReg());
4117 const LLT WideTy = MRI.getType(SrcReg);
4118 (void)WideTy;
4119 assert((WideTy.isVector() || WideTy.getSizeInBits() == 128) &&
4120 "can only unmerge from vector or s128 types!");
4121 assert(WideTy.getSizeInBits() > NarrowTy.getSizeInBits() &&
4122 "source register size too small!");
4123
4124 if (!NarrowTy.isScalar())
4125 return selectSplitVectorUnmerge(I, MRI);
4126
4127 // Choose a lane copy opcode and subregister based off of the size of the
4128 // vector's elements.
4129 unsigned CopyOpc = 0;
4130 unsigned ExtractSubReg = 0;
4131 if (!getLaneCopyOpcode(CopyOpc, ExtractSubReg, NarrowTy.getSizeInBits()))
4132 return false;
4133
4134 // Set up for the lane copies.
4135 MachineBasicBlock &MBB = *I.getParent();
4136
4137 // Stores the registers we'll be copying from.
4138 SmallVector<Register, 4> InsertRegs;
4139
4140 // We'll use the first register twice, so we only need NumElts-1 registers.
4141 unsigned NumInsertRegs = NumElts - 1;
4142
4143 // If our elements fit into exactly 128 bits, then we can copy from the source
4144 // directly. Otherwise, we need to do a bit of setup with some subregister
4145 // inserts.
4146 if (NarrowTy.getSizeInBits() * NumElts == 128) {
4147 InsertRegs = SmallVector<Register, 4>(NumInsertRegs, SrcReg);
4148 } else {
4149 // No. We have to perform subregister inserts. For each insert, create an
4150 // implicit def and a subregister insert, and save the register we create.
4151 const TargetRegisterClass *RC = getRegClassForTypeOnBank(
4152 LLT::fixed_vector(NumElts, WideTy.getScalarSizeInBits()),
4153 *RBI.getRegBank(SrcReg, MRI, TRI));
4154 unsigned SubReg = 0;
4155 bool Found = getSubRegForClass(RC, TRI, SubReg);
4156 (void)Found;
4157 assert(Found && "expected to find last operand's subeg idx");
4158 for (unsigned Idx = 0; Idx < NumInsertRegs; ++Idx) {
4159 Register ImpDefReg = MRI.createVirtualRegister(&AArch64::FPR128RegClass);
4160 MachineInstr &ImpDefMI =
4161 *BuildMI(MBB, I, I.getDebugLoc(), TII.get(TargetOpcode::IMPLICIT_DEF),
4162 ImpDefReg);
4163
4164 // Now, create the subregister insert from SrcReg.
4165 Register InsertReg = MRI.createVirtualRegister(&AArch64::FPR128RegClass);
4166 MachineInstr &InsMI =
4167 *BuildMI(MBB, I, I.getDebugLoc(),
4168 TII.get(TargetOpcode::INSERT_SUBREG), InsertReg)
4169 .addUse(ImpDefReg)
4170 .addUse(SrcReg)
4171 .addImm(SubReg);
4172
4173 constrainSelectedInstRegOperands(ImpDefMI, TII, TRI, RBI);
4175
4176 // Save the register so that we can copy from it after.
4177 InsertRegs.push_back(InsertReg);
4178 }
4179 }
4180
4181 // Now that we've created any necessary subregister inserts, we can
4182 // create the copies.
4183 //
4184 // Perform the first copy separately as a subregister copy.
4185 Register CopyTo = I.getOperand(0).getReg();
4186 auto FirstCopy = MIB.buildInstr(TargetOpcode::COPY, {CopyTo}, {})
4187 .addReg(InsertRegs[0], 0, ExtractSubReg);
4188 constrainSelectedInstRegOperands(*FirstCopy, TII, TRI, RBI);
4189
4190 // Now, perform the remaining copies as vector lane copies.
4191 unsigned LaneIdx = 1;
4192 for (Register InsReg : InsertRegs) {
4193 Register CopyTo = I.getOperand(LaneIdx).getReg();
4194 MachineInstr &CopyInst =
4195 *BuildMI(MBB, I, I.getDebugLoc(), TII.get(CopyOpc), CopyTo)
4196 .addUse(InsReg)
4197 .addImm(LaneIdx);
4198 constrainSelectedInstRegOperands(CopyInst, TII, TRI, RBI);
4199 ++LaneIdx;
4200 }
4201
4202 // Separately constrain the first copy's destination. Because of the
4203 // limitation in constrainOperandRegClass, we can't guarantee that this will
4204 // actually be constrained. So, do it ourselves using the second operand.
4205 const TargetRegisterClass *RC =
4206 MRI.getRegClassOrNull(I.getOperand(1).getReg());
4207 if (!RC) {
4208 LLVM_DEBUG(dbgs() << "Couldn't constrain copy destination.\n");
4209 return false;
4210 }
4211
4212 RBI.constrainGenericRegister(CopyTo, *RC, MRI);
4213 I.eraseFromParent();
4214 return true;
4215}
4216
4217bool AArch64InstructionSelector::selectConcatVectors(
4219 assert(I.getOpcode() == TargetOpcode::G_CONCAT_VECTORS &&
4220 "Unexpected opcode");
4221 Register Dst = I.getOperand(0).getReg();
4222 Register Op1 = I.getOperand(1).getReg();
4223 Register Op2 = I.getOperand(2).getReg();
4224 MachineInstr *ConcatMI = emitVectorConcat(Dst, Op1, Op2, MIB);
4225 if (!ConcatMI)
4226 return false;
4227 I.eraseFromParent();
4228 return true;
4229}
4230
4231unsigned
4232AArch64InstructionSelector::emitConstantPoolEntry(const Constant *CPVal,
4233 MachineFunction &MF) const {
4234 Type *CPTy = CPVal->getType();
4235 Align Alignment = MF.getDataLayout().getPrefTypeAlign(CPTy);
4236
4238 return MCP->getConstantPoolIndex(CPVal, Alignment);
4239}
4240
4241MachineInstr *AArch64InstructionSelector::emitLoadFromConstantPool(
4242 const Constant *CPVal, MachineIRBuilder &MIRBuilder) const {
4243 const TargetRegisterClass *RC;
4244 unsigned Opc;
4245 bool IsTiny = TM.getCodeModel() == CodeModel::Tiny;
4246 unsigned Size = MIRBuilder.getDataLayout().getTypeStoreSize(CPVal->getType());
4247 switch (Size) {
4248 case 16:
4249 RC = &AArch64::FPR128RegClass;
4250 Opc = IsTiny ? AArch64::LDRQl : AArch64::LDRQui;
4251 break;
4252 case 8:
4253 RC = &AArch64::FPR64RegClass;
4254 Opc = IsTiny ? AArch64::LDRDl : AArch64::LDRDui;
4255 break;
4256 case 4:
4257 RC = &AArch64::FPR32RegClass;
4258 Opc = IsTiny ? AArch64::LDRSl : AArch64::LDRSui;
4259 break;
4260 case 2:
4261 RC = &AArch64::FPR16RegClass;
4262 Opc = AArch64::LDRHui;
4263 break;
4264 default:
4265 LLVM_DEBUG(dbgs() << "Could not load from constant pool of type "
4266 << *CPVal->getType());
4267 return nullptr;
4268 }
4269
4270 MachineInstr *LoadMI = nullptr;
4271 auto &MF = MIRBuilder.getMF();
4272 unsigned CPIdx = emitConstantPoolEntry(CPVal, MF);
4273 if (IsTiny && (Size == 16 || Size == 8 || Size == 4)) {
4274 // Use load(literal) for tiny code model.
4275 LoadMI = &*MIRBuilder.buildInstr(Opc, {RC}, {}).addConstantPoolIndex(CPIdx);
4276 } else {
4277 auto Adrp =
4278 MIRBuilder.buildInstr(AArch64::ADRP, {&AArch64::GPR64RegClass}, {})
4279 .addConstantPoolIndex(CPIdx, 0, AArch64II::MO_PAGE);
4280
4281 LoadMI = &*MIRBuilder.buildInstr(Opc, {RC}, {Adrp})
4282 .addConstantPoolIndex(
4284
4286 }
4287
4289 LoadMI->addMemOperand(MF, MF.getMachineMemOperand(PtrInfo,
4291 Size, Align(Size)));
4293 return LoadMI;
4294}
4295
4296/// Return an <Opcode, SubregIndex> pair to do an vector elt insert of a given
4297/// size and RB.
4298static std::pair<unsigned, unsigned>
4299getInsertVecEltOpInfo(const RegisterBank &RB, unsigned EltSize) {
4300 unsigned Opc, SubregIdx;
4301 if (RB.getID() == AArch64::GPRRegBankID) {
4302 if (EltSize == 8) {
4303 Opc = AArch64::INSvi8gpr;
4304 SubregIdx = AArch64::bsub;
4305 } else if (EltSize == 16) {
4306 Opc = AArch64::INSvi16gpr;
4307 SubregIdx = AArch64::ssub;
4308 } else if (EltSize == 32) {
4309 Opc = AArch64::INSvi32gpr;
4310 SubregIdx = AArch64::ssub;
4311 } else if (EltSize == 64) {
4312 Opc = AArch64::INSvi64gpr;
4313 SubregIdx = AArch64::dsub;
4314 } else {
4315 llvm_unreachable("invalid elt size!");
4316 }
4317 } else {
4318 if (EltSize == 8) {
4319 Opc = AArch64::INSvi8lane;
4320 SubregIdx = AArch64::bsub;
4321 } else if (EltSize == 16) {
4322 Opc = AArch64::INSvi16lane;
4323 SubregIdx = AArch64::hsub;
4324 } else if (EltSize == 32) {
4325 Opc = AArch64::INSvi32lane;
4326 SubregIdx = AArch64::ssub;
4327 } else if (EltSize == 64) {
4328 Opc = AArch64::INSvi64lane;
4329 SubregIdx = AArch64::dsub;
4330 } else {
4331 llvm_unreachable("invalid elt size!");
4332 }
4333 }
4334 return std::make_pair(Opc, SubregIdx);
4335}
4336
4337MachineInstr *AArch64InstructionSelector::emitInstr(
4338 unsigned Opcode, std::initializer_list<llvm::DstOp> DstOps,
4339 std::initializer_list<llvm::SrcOp> SrcOps, MachineIRBuilder &MIRBuilder,
4340 const ComplexRendererFns &RenderFns) const {
4341 assert(Opcode && "Expected an opcode?");
4342 assert(!isPreISelGenericOpcode(Opcode) &&
4343 "Function should only be used to produce selected instructions!");
4344 auto MI = MIRBuilder.buildInstr(Opcode, DstOps, SrcOps);
4345 if (RenderFns)
4346 for (auto &Fn : *RenderFns)
4347 Fn(MI);
4349 return &*MI;
4350}
4351
4352MachineInstr *AArch64InstructionSelector::emitAddSub(
4353 const std::array<std::array<unsigned, 2>, 5> &AddrModeAndSizeToOpcode,
4354 Register Dst, MachineOperand &LHS, MachineOperand &RHS,
4355 MachineIRBuilder &MIRBuilder) const {
4356 MachineRegisterInfo &MRI = MIRBuilder.getMF().getRegInfo();
4357 assert(LHS.isReg() && RHS.isReg() && "Expected register operands?");
4358 auto Ty = MRI.getType(LHS.getReg());
4359 assert(!Ty.isVector() && "Expected a scalar or pointer?");
4360 unsigned Size = Ty.getSizeInBits();
4361 assert((Size == 32 || Size == 64) && "Expected a 32-bit or 64-bit type only");
4362 bool Is32Bit = Size == 32;
4363
4364 // INSTRri form with positive arithmetic immediate.
4365 if (auto Fns = selectArithImmed(RHS))
4366 return emitInstr(AddrModeAndSizeToOpcode[0][Is32Bit], {Dst}, {LHS},
4367 MIRBuilder, Fns);
4368
4369 // INSTRri form with negative arithmetic immediate.
4370 if (auto Fns = selectNegArithImmed(RHS))
4371 return emitInstr(AddrModeAndSizeToOpcode[3][Is32Bit], {Dst}, {LHS},
4372 MIRBuilder, Fns);
4373
4374 // INSTRrx form.
4375 if (auto Fns = selectArithExtendedRegister(RHS))
4376 return emitInstr(AddrModeAndSizeToOpcode[4][Is32Bit], {Dst}, {LHS},
4377 MIRBuilder, Fns);
4378
4379 // INSTRrs form.
4380 if (auto Fns = selectShiftedRegister(RHS))
4381 return emitInstr(AddrModeAndSizeToOpcode[1][Is32Bit], {Dst}, {LHS},
4382 MIRBuilder, Fns);
4383 return emitInstr(AddrModeAndSizeToOpcode[2][Is32Bit], {Dst}, {LHS, RHS},
4384 MIRBuilder);
4385}
4386
4388AArch64InstructionSelector::emitADD(Register DefReg, MachineOperand &LHS,
4389 MachineOperand &RHS,
4390 MachineIRBuilder &MIRBuilder) const {
4391 const std::array<std::array<unsigned, 2>, 5> OpcTable{
4392 {{AArch64::ADDXri, AArch64::ADDWri},
4393 {AArch64::ADDXrs, AArch64::ADDWrs},
4394 {AArch64::ADDXrr, AArch64::ADDWrr},
4395 {AArch64::SUBXri, AArch64::SUBWri},
4396 {AArch64::ADDXrx, AArch64::ADDWrx}}};
4397 return emitAddSub(OpcTable, DefReg, LHS, RHS, MIRBuilder);
4398}
4399
4401AArch64InstructionSelector::emitADDS(Register Dst, MachineOperand &LHS,
4402 MachineOperand &RHS,
4403 MachineIRBuilder &MIRBuilder) const {
4404 const std::array<std::array<unsigned, 2>, 5> OpcTable{
4405 {{AArch64::ADDSXri, AArch64::ADDSWri},
4406 {AArch64::ADDSXrs, AArch64::ADDSWrs},
4407 {AArch64::ADDSXrr, AArch64::ADDSWrr},
4408 {AArch64::SUBSXri, AArch64::SUBSWri},
4409 {AArch64::ADDSXrx, AArch64::ADDSWrx}}};
4410 return emitAddSub(OpcTable, Dst, LHS, RHS, MIRBuilder);
4411}
4412
4414AArch64InstructionSelector::emitSUBS(Register Dst, MachineOperand &LHS,
4415 MachineOperand &RHS,
4416 MachineIRBuilder &MIRBuilder) const {
4417 const std::array<std::array<unsigned, 2>, 5> OpcTable{
4418 {{AArch64::SUBSXri, AArch64::SUBSWri},
4419 {AArch64::SUBSXrs, AArch64::SUBSWrs},
4420 {AArch64::SUBSXrr, AArch64::SUBSWrr},
4421 {AArch64::ADDSXri, AArch64::ADDSWri},
4422 {AArch64::SUBSXrx, AArch64::SUBSWrx}}};
4423 return emitAddSub(OpcTable, Dst, LHS, RHS, MIRBuilder);
4424}
4425
4427AArch64InstructionSelector::emitADCS(Register Dst, MachineOperand &LHS,
4428 MachineOperand &RHS,
4429 MachineIRBuilder &MIRBuilder) const {
4430 assert(LHS.isReg() && RHS.isReg() && "Expected register operands?");
4431 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
4432 bool Is32Bit = (MRI->getType(LHS.getReg()).getSizeInBits() == 32);
4433 static const unsigned OpcTable[2] = {AArch64::ADCSXr, AArch64::ADCSWr};
4434 return emitInstr(OpcTable[Is32Bit], {Dst}, {LHS, RHS}, MIRBuilder);
4435}
4436
4438AArch64InstructionSelector::emitSBCS(Register Dst, MachineOperand &LHS,
4439 MachineOperand &RHS,
4440 MachineIRBuilder &MIRBuilder) const {
4441 assert(LHS.isReg() && RHS.isReg() && "Expected register operands?");
4442 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
4443 bool Is32Bit = (MRI->getType(LHS.getReg()).getSizeInBits() == 32);
4444 static const unsigned OpcTable[2] = {AArch64::SBCSXr, AArch64::SBCSWr};
4445 return emitInstr(OpcTable[Is32Bit], {Dst}, {LHS, RHS}, MIRBuilder);
4446}
4447
4449AArch64InstructionSelector::emitCMN(MachineOperand &LHS, MachineOperand &RHS,
4450 MachineIRBuilder &MIRBuilder) const {
4451 MachineRegisterInfo &MRI = MIRBuilder.getMF().getRegInfo();
4452 bool Is32Bit = (MRI.getType(LHS.getReg()).getSizeInBits() == 32);
4453 auto RC = Is32Bit ? &AArch64::GPR32RegClass : &AArch64::GPR64RegClass;
4454 return emitADDS(MRI.createVirtualRegister(RC), LHS, RHS, MIRBuilder);
4455}
4456
4458AArch64InstructionSelector::emitTST(MachineOperand &LHS, MachineOperand &RHS,
4459 MachineIRBuilder &MIRBuilder) const {
4460 assert(LHS.isReg() && RHS.isReg() && "Expected register operands?");
4461 MachineRegisterInfo &MRI = MIRBuilder.getMF().getRegInfo();
4462 LLT Ty = MRI.getType(LHS.getReg());
4463 unsigned RegSize = Ty.getSizeInBits();
4464 bool Is32Bit = (RegSize == 32);
4465 const unsigned OpcTable[3][2] = {{AArch64::ANDSXri, AArch64::ANDSWri},
4466 {AArch64::ANDSXrs, AArch64::ANDSWrs},
4467 {AArch64::ANDSXrr, AArch64::ANDSWrr}};
4468 // ANDS needs a logical immediate for its immediate form. Check if we can
4469 // fold one in.
4470 if (auto ValAndVReg = getIConstantVRegValWithLookThrough(RHS.getReg(), MRI)) {
4471 int64_t Imm = ValAndVReg->Value.getSExtValue();
4472
4474 auto TstMI = MIRBuilder.buildInstr(OpcTable[0][Is32Bit], {Ty}, {LHS});
4477 return &*TstMI;
4478 }
4479 }
4480
4481 if (auto Fns = selectLogicalShiftedRegister(RHS))
4482 return emitInstr(OpcTable[1][Is32Bit], {Ty}, {LHS}, MIRBuilder, Fns);
4483 return emitInstr(OpcTable[2][Is32Bit], {Ty}, {LHS, RHS}, MIRBuilder);
4484}
4485
4486MachineInstr *AArch64InstructionSelector::emitIntegerCompare(
4487 MachineOperand &LHS, MachineOperand &RHS, MachineOperand &Predicate,
4488 MachineIRBuilder &MIRBuilder) const {
4489 assert(LHS.isReg() && RHS.isReg() && "Expected LHS and RHS to be registers!");
4490 assert(Predicate.isPredicate() && "Expected predicate?");
4491 MachineRegisterInfo &MRI = MIRBuilder.getMF().getRegInfo();
4492 LLT CmpTy = MRI.getType(LHS.getReg());
4493 assert(!CmpTy.isVector() && "Expected scalar or pointer");
4494 unsigned Size = CmpTy.getSizeInBits();
4495 (void)Size;
4496 assert((Size == 32 || Size == 64) && "Expected a 32-bit or 64-bit LHS/RHS?");
4497 // Fold the compare into a cmn or tst if possible.
4498 if (auto FoldCmp = tryFoldIntegerCompare(LHS, RHS, Predicate, MIRBuilder))
4499 return FoldCmp;
4500 auto Dst = MRI.cloneVirtualRegister(LHS.getReg());
4501 return emitSUBS(Dst, LHS, RHS, MIRBuilder);
4502}
4503
4504MachineInstr *AArch64InstructionSelector::emitCSetForFCmp(
4505 Register Dst, CmpInst::Predicate Pred, MachineIRBuilder &MIRBuilder) const {
4506 MachineRegisterInfo &MRI = *MIRBuilder.getMRI();
4507#ifndef NDEBUG
4508 LLT Ty = MRI.getType(Dst);
4509 assert(!Ty.isVector() && Ty.getSizeInBits() == 32 &&
4510 "Expected a 32-bit scalar register?");
4511#endif
4512 const Register ZReg = AArch64::WZR;
4513 AArch64CC::CondCode CC1, CC2;
4514 changeFCMPPredToAArch64CC(Pred, CC1, CC2);
4515 auto InvCC1 = AArch64CC::getInvertedCondCode(CC1);
4516 if (CC2 == AArch64CC::AL)
4517 return emitCSINC(/*Dst=*/Dst, /*Src1=*/ZReg, /*Src2=*/ZReg, InvCC1,
4518 MIRBuilder);
4519 const TargetRegisterClass *RC = &AArch64::GPR32RegClass;
4520 Register Def1Reg = MRI.createVirtualRegister(RC);
4521 Register Def2Reg = MRI.createVirtualRegister(RC);
4522 auto InvCC2 = AArch64CC::getInvertedCondCode(CC2);
4523 emitCSINC(/*Dst=*/Def1Reg, /*Src1=*/ZReg, /*Src2=*/ZReg, InvCC1, MIRBuilder);
4524 emitCSINC(/*Dst=*/Def2Reg, /*Src1=*/ZReg, /*Src2=*/ZReg, InvCC2, MIRBuilder);
4525 auto OrMI = MIRBuilder.buildInstr(AArch64::ORRWrr, {Dst}, {Def1Reg, Def2Reg});
4527 return &*OrMI;
4528}
4529
4530MachineInstr *AArch64InstructionSelector::emitFPCompare(
4531 Register LHS, Register RHS, MachineIRBuilder &MIRBuilder,
4532 std::optional<CmpInst::Predicate> Pred) const {
4533 MachineRegisterInfo &MRI = *MIRBuilder.getMRI();
4534 LLT Ty = MRI.getType(LHS);
4535 if (Ty.isVector())
4536 return nullptr;
4537 unsigned OpSize = Ty.getSizeInBits();
4538 assert(OpSize == 16 || OpSize == 32 || OpSize == 64);
4539
4540 // If this is a compare against +0.0, then we don't have
4541 // to explicitly materialize a constant.
4542 const ConstantFP *FPImm = getConstantFPVRegVal(RHS, MRI);
4543 bool ShouldUseImm = FPImm && (FPImm->isZero() && !FPImm->isNegative());
4544
4545 auto IsEqualityPred = [](CmpInst::Predicate P) {
4546 return P == CmpInst::FCMP_OEQ || P == CmpInst::FCMP_ONE ||
4548 };
4549 if (!ShouldUseImm && Pred && IsEqualityPred(*Pred)) {
4550 // Try commutating the operands.
4551 const ConstantFP *LHSImm = getConstantFPVRegVal(LHS, MRI);
4552 if (LHSImm && (LHSImm->isZero() && !LHSImm->isNegative())) {
4553 ShouldUseImm = true;
4554 std::swap(LHS, RHS);
4555 }
4556 }
4557 unsigned CmpOpcTbl[2][3] = {
4558 {AArch64::FCMPHrr, AArch64::FCMPSrr, AArch64::FCMPDrr},
4559 {AArch64::FCMPHri, AArch64::FCMPSri, AArch64::FCMPDri}};
4560 unsigned CmpOpc =
4561 CmpOpcTbl[ShouldUseImm][OpSize == 16 ? 0 : (OpSize == 32 ? 1 : 2)];
4562
4563 // Partially build the compare. Decide if we need to add a use for the
4564 // third operand based off whether or not we're comparing against 0.0.
4565 auto CmpMI = MIRBuilder.buildInstr(CmpOpc).addUse(LHS);
4567 if (!ShouldUseImm)
4568 CmpMI.addUse(RHS);
4570 return &*CmpMI;
4571}
4572
4573MachineInstr *AArch64InstructionSelector::emitVectorConcat(
4574 std::optional<Register> Dst, Register Op1, Register Op2,
4575 MachineIRBuilder &MIRBuilder) const {
4576 // We implement a vector concat by:
4577 // 1. Use scalar_to_vector to insert the lower vector into the larger dest
4578 // 2. Insert the upper vector into the destination's upper element
4579 // TODO: some of this code is common with G_BUILD_VECTOR handling.
4580 MachineRegisterInfo &MRI = MIRBuilder.getMF().getRegInfo();
4581
4582 const LLT Op1Ty = MRI.getType(Op1);
4583 const LLT Op2Ty = MRI.getType(Op2);
4584
4585 if (Op1Ty != Op2Ty) {
4586 LLVM_DEBUG(dbgs() << "Could not do vector concat of differing vector tys");
4587 return nullptr;
4588 }
4589 assert(Op1Ty.isVector() && "Expected a vector for vector concat");
4590
4591 if (Op1Ty.getSizeInBits() >= 128) {
4592 LLVM_DEBUG(dbgs() << "Vector concat not supported for full size vectors");
4593 return nullptr;
4594 }
4595
4596 // At the moment we just support 64 bit vector concats.
4597 if (Op1Ty.getSizeInBits() != 64) {
4598 LLVM_DEBUG(dbgs() << "Vector concat supported for 64b vectors");
4599 return nullptr;
4600 }
4601
4602 const LLT ScalarTy = LLT::scalar(Op1Ty.getSizeInBits());
4603 const RegisterBank &FPRBank = *RBI.getRegBank(Op1, MRI, TRI);
4604 const TargetRegisterClass *DstRC =
4605 getRegClassForTypeOnBank(Op1Ty.multiplyElements(2), FPRBank);
4606
4607 MachineInstr *WidenedOp1 =
4608 emitScalarToVector(ScalarTy.getSizeInBits(), DstRC, Op1, MIRBuilder);
4609 MachineInstr *WidenedOp2 =
4610 emitScalarToVector(ScalarTy.getSizeInBits(), DstRC, Op2, MIRBuilder);
4611 if (!WidenedOp1 || !WidenedOp2) {
4612 LLVM_DEBUG(dbgs() << "Could not emit a vector from scalar value");
4613 return nullptr;
4614 }
4615
4616 // Now do the insert of the upper element.
4617 unsigned InsertOpc, InsSubRegIdx;
4618 std::tie(InsertOpc, InsSubRegIdx) =
4619 getInsertVecEltOpInfo(FPRBank, ScalarTy.getSizeInBits());
4620
4621 if (!Dst)
4622 Dst = MRI.createVirtualRegister(DstRC);
4623 auto InsElt =
4624 MIRBuilder
4625 .buildInstr(InsertOpc, {*Dst}, {WidenedOp1->getOperand(0).getReg()})
4626 .addImm(1) /* Lane index */
4627 .addUse(WidenedOp2->getOperand(0).getReg())
4628 .addImm(0);
4630 return &*InsElt;
4631}
4632
4634AArch64InstructionSelector::emitCSINC(Register Dst, Register Src1,
4635 Register Src2, AArch64CC::CondCode Pred,
4636 MachineIRBuilder &MIRBuilder) const {
4637 auto &MRI = *MIRBuilder.getMRI();
4638 const RegClassOrRegBank &RegClassOrBank = MRI.getRegClassOrRegBank(Dst);
4639 // If we used a register class, then this won't necessarily have an LLT.
4640 // Compute the size based off whether or not we have a class or bank.
4641 unsigned Size;
4642 if (const auto *RC = RegClassOrBank.dyn_cast<const TargetRegisterClass *>())
4643 Size = TRI.getRegSizeInBits(*RC);
4644 else
4645 Size = MRI.getType(Dst).getSizeInBits();
4646 // Some opcodes use s1.
4647 assert(Size <= 64 && "Expected 64 bits or less only!");
4648 static const unsigned OpcTable[2] = {AArch64::CSINCWr, AArch64::CSINCXr};
4649 unsigned Opc = OpcTable[Size == 64];
4650 auto CSINC = MIRBuilder.buildInstr(Opc, {Dst}, {Src1, Src2}).addImm(Pred);
4652 return &*CSINC;
4653}
4654
4655MachineInstr *AArch64InstructionSelector::emitCarryIn(MachineInstr &I,
4656 Register CarryReg) {
4658 unsigned Opcode = I.getOpcode();
4659
4660 // If the instruction is a SUB, we need to negate the carry,
4661 // because borrowing is indicated by carry-flag == 0.
4662 bool NeedsNegatedCarry =
4663 (Opcode == TargetOpcode::G_USUBE || Opcode == TargetOpcode::G_SSUBE);
4664
4665 // If the previous instruction will already produce the correct carry, do not
4666 // emit a carry generating instruction. E.g. for G_UADDE/G_USUBE sequences
4667 // generated during legalization of wide add/sub. This optimization depends on
4668 // these sequences not being interrupted by other instructions.
4669 // We have to select the previous instruction before the carry-using
4670 // instruction is deleted by the calling function, otherwise the previous
4671 // instruction might become dead and would get deleted.
4672 MachineInstr *SrcMI = MRI->getVRegDef(CarryReg);
4673 if (SrcMI == I.getPrevNode()) {
4674 if (auto *CarrySrcMI = dyn_cast<GAddSubCarryOut>(SrcMI)) {
4675 bool ProducesNegatedCarry = CarrySrcMI->isSub();
4676 if (NeedsNegatedCarry == ProducesNegatedCarry &&
4677 CarrySrcMI->isUnsigned() &&
4678 CarrySrcMI->getCarryOutReg() == CarryReg &&
4679 selectAndRestoreState(*SrcMI))
4680 return nullptr;
4681 }
4682 }
4683
4684 Register DeadReg = MRI->createVirtualRegister(&AArch64::GPR32RegClass);
4685
4686 if (NeedsNegatedCarry) {
4687 // (0 - Carry) sets !C in NZCV when Carry == 1
4688 Register ZReg = AArch64::WZR;
4689 return emitInstr(AArch64::SUBSWrr, {DeadReg}, {ZReg, CarryReg}, MIB);
4690 }
4691
4692 // (Carry - 1) sets !C in NZCV when Carry == 0
4693 auto Fns = select12BitValueWithLeftShift(1);
4694 return emitInstr(AArch64::SUBSWri, {DeadReg}, {CarryReg}, MIB, Fns);
4695}
4696
4697bool AArch64InstructionSelector::selectOverflowOp(MachineInstr &I,
4699 auto &CarryMI = cast<GAddSubCarryOut>(I);
4700
4701 if (auto *CarryInMI = dyn_cast<GAddSubCarryInOut>(&I)) {
4702 // Set NZCV carry according to carry-in VReg
4703 emitCarryIn(I, CarryInMI->getCarryInReg());
4704 }
4705
4706 // Emit the operation and get the correct condition code.
4707 auto OpAndCC = emitOverflowOp(I.getOpcode(), CarryMI.getDstReg(),
4708 CarryMI.getLHS(), CarryMI.getRHS(), MIB);
4709
4710 Register CarryOutReg = CarryMI.getCarryOutReg();
4711
4712 // Don't convert carry-out to VReg if it is never used
4713 if (!MRI.use_nodbg_empty(CarryOutReg)) {
4714 // Now, put the overflow result in the register given by the first operand