LLVM 20.0.0git
ARMFastISel.cpp
Go to the documentation of this file.
1//===- ARMFastISel.cpp - ARM FastISel implementation ----------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the ARM-specific support for the FastISel class. Some
10// of the target-specific code is generated by tablegen in the file
11// ARMGenFastISel.inc, which is #included here.
12//
13//===----------------------------------------------------------------------===//
14
15#include "ARM.h"
16#include "ARMBaseInstrInfo.h"
17#include "ARMBaseRegisterInfo.h"
18#include "ARMCallingConv.h"
20#include "ARMISelLowering.h"
22#include "ARMSubtarget.h"
25#include "Utils/ARMBaseInfo.h"
26#include "llvm/ADT/APFloat.h"
27#include "llvm/ADT/APInt.h"
28#include "llvm/ADT/DenseMap.h"
50#include "llvm/IR/Argument.h"
51#include "llvm/IR/Attributes.h"
52#include "llvm/IR/CallingConv.h"
53#include "llvm/IR/Constant.h"
54#include "llvm/IR/Constants.h"
55#include "llvm/IR/DataLayout.h"
57#include "llvm/IR/Function.h"
59#include "llvm/IR/GlobalValue.h"
61#include "llvm/IR/InstrTypes.h"
62#include "llvm/IR/Instruction.h"
65#include "llvm/IR/Intrinsics.h"
66#include "llvm/IR/Module.h"
67#include "llvm/IR/Operator.h"
68#include "llvm/IR/Type.h"
69#include "llvm/IR/User.h"
70#include "llvm/IR/Value.h"
71#include "llvm/MC/MCInstrDesc.h"
79#include <cassert>
80#include <cstdint>
81#include <utility>
82
83using namespace llvm;
84
85namespace {
86
87 // All possible address modes, plus some.
88 struct Address {
89 enum {
90 RegBase,
91 FrameIndexBase
92 } BaseType = RegBase;
93
94 union {
95 unsigned Reg;
96 int FI;
97 } Base;
98
99 int Offset = 0;
100
101 // Innocuous defaults for our address.
102 Address() {
103 Base.Reg = 0;
104 }
105 };
106
107class ARMFastISel final : public FastISel {
108 /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can
109 /// make the right decision when generating code for different targets.
110 const ARMSubtarget *Subtarget;
111 Module &M;
112 const TargetMachine &TM;
113 const TargetInstrInfo &TII;
114 const TargetLowering &TLI;
115 ARMFunctionInfo *AFI;
116
117 // Convenience variables to avoid some queries.
118 bool isThumb2;
119 LLVMContext *Context;
120
121 public:
122 explicit ARMFastISel(FunctionLoweringInfo &funcInfo,
123 const TargetLibraryInfo *libInfo)
124 : FastISel(funcInfo, libInfo),
125 Subtarget(&funcInfo.MF->getSubtarget<ARMSubtarget>()),
126 M(const_cast<Module &>(*funcInfo.Fn->getParent())),
127 TM(funcInfo.MF->getTarget()), TII(*Subtarget->getInstrInfo()),
128 TLI(*Subtarget->getTargetLowering()) {
129 AFI = funcInfo.MF->getInfo<ARMFunctionInfo>();
130 isThumb2 = AFI->isThumbFunction();
131 Context = &funcInfo.Fn->getContext();
132 }
133
134 private:
135 // Code from FastISel.cpp.
136
137 unsigned fastEmitInst_r(unsigned MachineInstOpcode,
138 const TargetRegisterClass *RC, unsigned Op0);
139 unsigned fastEmitInst_rr(unsigned MachineInstOpcode,
140 const TargetRegisterClass *RC,
141 unsigned Op0, unsigned Op1);
142 unsigned fastEmitInst_ri(unsigned MachineInstOpcode,
143 const TargetRegisterClass *RC,
144 unsigned Op0, uint64_t Imm);
145 unsigned fastEmitInst_i(unsigned MachineInstOpcode,
146 const TargetRegisterClass *RC,
147 uint64_t Imm);
148
149 // Backend specific FastISel code.
150
151 bool fastSelectInstruction(const Instruction *I) override;
152 unsigned fastMaterializeConstant(const Constant *C) override;
153 unsigned fastMaterializeAlloca(const AllocaInst *AI) override;
154 bool tryToFoldLoadIntoMI(MachineInstr *MI, unsigned OpNo,
155 const LoadInst *LI) override;
156 bool fastLowerArguments() override;
157
158#include "ARMGenFastISel.inc"
159
160 // Instruction selection routines.
161
162 bool SelectLoad(const Instruction *I);
163 bool SelectStore(const Instruction *I);
164 bool SelectBranch(const Instruction *I);
165 bool SelectIndirectBr(const Instruction *I);
166 bool SelectCmp(const Instruction *I);
167 bool SelectFPExt(const Instruction *I);
168 bool SelectFPTrunc(const Instruction *I);
169 bool SelectBinaryIntOp(const Instruction *I, unsigned ISDOpcode);
170 bool SelectBinaryFPOp(const Instruction *I, unsigned ISDOpcode);
171 bool SelectIToFP(const Instruction *I, bool isSigned);
172 bool SelectFPToI(const Instruction *I, bool isSigned);
173 bool SelectDiv(const Instruction *I, bool isSigned);
174 bool SelectRem(const Instruction *I, bool isSigned);
175 bool SelectCall(const Instruction *I, const char *IntrMemName);
176 bool SelectIntrinsicCall(const IntrinsicInst &I);
177 bool SelectSelect(const Instruction *I);
178 bool SelectRet(const Instruction *I);
179 bool SelectTrunc(const Instruction *I);
180 bool SelectIntExt(const Instruction *I);
181 bool SelectShift(const Instruction *I, ARM_AM::ShiftOpc ShiftTy);
182
183 // Utility routines.
184
185 bool isPositionIndependent() const;
186 bool isTypeLegal(Type *Ty, MVT &VT);
187 bool isLoadTypeLegal(Type *Ty, MVT &VT);
188 bool ARMEmitCmp(const Value *Src1Value, const Value *Src2Value,
189 bool isZExt);
190 bool ARMEmitLoad(MVT VT, Register &ResultReg, Address &Addr,
191 MaybeAlign Alignment = std::nullopt, bool isZExt = true,
192 bool allocReg = true);
193 bool ARMEmitStore(MVT VT, unsigned SrcReg, Address &Addr,
194 MaybeAlign Alignment = std::nullopt);
195 bool ARMComputeAddress(const Value *Obj, Address &Addr);
196 void ARMSimplifyAddress(Address &Addr, MVT VT, bool useAM3);
197 bool ARMIsMemCpySmall(uint64_t Len);
198 bool ARMTryEmitSmallMemCpy(Address Dest, Address Src, uint64_t Len,
199 MaybeAlign Alignment);
200 unsigned ARMEmitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT, bool isZExt);
201 unsigned ARMMaterializeFP(const ConstantFP *CFP, MVT VT);
202 unsigned ARMMaterializeInt(const Constant *C, MVT VT);
203 unsigned ARMMaterializeGV(const GlobalValue *GV, MVT VT);
204 unsigned ARMMoveToFPReg(MVT VT, unsigned SrcReg);
205 unsigned ARMMoveToIntReg(MVT VT, unsigned SrcReg);
206 unsigned ARMSelectCallOp(bool UseReg);
207 unsigned ARMLowerPICELF(const GlobalValue *GV, MVT VT);
208
209 const TargetLowering *getTargetLowering() { return &TLI; }
210
211 // Call handling routines.
212
213 CCAssignFn *CCAssignFnForCall(CallingConv::ID CC,
214 bool Return,
215 bool isVarArg);
216 bool ProcessCallArgs(SmallVectorImpl<Value*> &Args,
218 SmallVectorImpl<MVT> &ArgVTs,
222 unsigned &NumBytes,
223 bool isVarArg);
224 unsigned getLibcallReg(const Twine &Name);
225 bool FinishCall(MVT RetVT, SmallVectorImpl<Register> &UsedRegs,
227 unsigned &NumBytes, bool isVarArg);
228 bool ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call);
229
230 // OptionalDef handling routines.
231
232 bool isARMNEONPred(const MachineInstr *MI);
233 bool DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR);
234 const MachineInstrBuilder &AddOptionalDefs(const MachineInstrBuilder &MIB);
235 void AddLoadStoreOperands(MVT VT, Address &Addr,
236 const MachineInstrBuilder &MIB,
237 MachineMemOperand::Flags Flags, bool useAM3);
238};
239
240} // end anonymous namespace
241
242// DefinesOptionalPredicate - This is different from DefinesPredicate in that
243// we don't care about implicit defs here, just places we'll need to add a
244// default CCReg argument. Sets CPSR if we're setting CPSR instead of CCR.
245bool ARMFastISel::DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR) {
246 if (!MI->hasOptionalDef())
247 return false;
248
249 // Look to see if our OptionalDef is defining CPSR or CCR.
250 for (const MachineOperand &MO : MI->operands()) {
251 if (!MO.isReg() || !MO.isDef()) continue;
252 if (MO.getReg() == ARM::CPSR)
253 *CPSR = true;
254 }
255 return true;
256}
257
258bool ARMFastISel::isARMNEONPred(const MachineInstr *MI) {
259 const MCInstrDesc &MCID = MI->getDesc();
260
261 // If we're a thumb2 or not NEON function we'll be handled via isPredicable.
263 AFI->isThumb2Function())
264 return MI->isPredicable();
265
266 for (const MCOperandInfo &opInfo : MCID.operands())
267 if (opInfo.isPredicate())
268 return true;
269
270 return false;
271}
272
273// If the machine is predicable go ahead and add the predicate operands, if
274// it needs default CC operands add those.
275// TODO: If we want to support thumb1 then we'll need to deal with optional
276// CPSR defs that need to be added before the remaining operands. See s_cc_out
277// for descriptions why.
279ARMFastISel::AddOptionalDefs(const MachineInstrBuilder &MIB) {
280 MachineInstr *MI = &*MIB;
281
282 // Do we use a predicate? or...
283 // Are we NEON in ARM mode and have a predicate operand? If so, I know
284 // we're not predicable but add it anyways.
285 if (isARMNEONPred(MI))
286 MIB.add(predOps(ARMCC::AL));
287
288 // Do we optionally set a predicate? Preds is size > 0 iff the predicate
289 // defines CPSR. All other OptionalDefines in ARM are the CCR register.
290 bool CPSR = false;
291 if (DefinesOptionalPredicate(MI, &CPSR))
292 MIB.add(CPSR ? t1CondCodeOp() : condCodeOp());
293 return MIB;
294}
295
296unsigned ARMFastISel::fastEmitInst_r(unsigned MachineInstOpcode,
297 const TargetRegisterClass *RC,
298 unsigned Op0) {
299 Register ResultReg = createResultReg(RC);
300 const MCInstrDesc &II = TII.get(MachineInstOpcode);
301
302 // Make sure the input operand is sufficiently constrained to be legal
303 // for this instruction.
304 Op0 = constrainOperandRegClass(II, Op0, 1);
305 if (II.getNumDefs() >= 1) {
306 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II,
307 ResultReg).addReg(Op0));
308 } else {
309 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II)
310 .addReg(Op0));
311 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
312 TII.get(TargetOpcode::COPY), ResultReg)
313 .addReg(II.implicit_defs()[0]));
314 }
315 return ResultReg;
316}
317
318unsigned ARMFastISel::fastEmitInst_rr(unsigned MachineInstOpcode,
319 const TargetRegisterClass *RC,
320 unsigned Op0, unsigned Op1) {
321 Register ResultReg = createResultReg(RC);
322 const MCInstrDesc &II = TII.get(MachineInstOpcode);
323
324 // Make sure the input operands are sufficiently constrained to be legal
325 // for this instruction.
326 Op0 = constrainOperandRegClass(II, Op0, 1);
327 Op1 = constrainOperandRegClass(II, Op1, 2);
328
329 if (II.getNumDefs() >= 1) {
330 AddOptionalDefs(
331 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg)
332 .addReg(Op0)
333 .addReg(Op1));
334 } else {
335 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II)
336 .addReg(Op0)
337 .addReg(Op1));
338 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
339 TII.get(TargetOpcode::COPY), ResultReg)
340 .addReg(II.implicit_defs()[0]));
341 }
342 return ResultReg;
343}
344
345unsigned ARMFastISel::fastEmitInst_ri(unsigned MachineInstOpcode,
346 const TargetRegisterClass *RC,
347 unsigned Op0, uint64_t Imm) {
348 Register ResultReg = createResultReg(RC);
349 const MCInstrDesc &II = TII.get(MachineInstOpcode);
350
351 // Make sure the input operand is sufficiently constrained to be legal
352 // for this instruction.
353 Op0 = constrainOperandRegClass(II, Op0, 1);
354 if (II.getNumDefs() >= 1) {
355 AddOptionalDefs(
356 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg)
357 .addReg(Op0)
358 .addImm(Imm));
359 } else {
360 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II)
361 .addReg(Op0)
362 .addImm(Imm));
363 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
364 TII.get(TargetOpcode::COPY), ResultReg)
365 .addReg(II.implicit_defs()[0]));
366 }
367 return ResultReg;
368}
369
370unsigned ARMFastISel::fastEmitInst_i(unsigned MachineInstOpcode,
371 const TargetRegisterClass *RC,
372 uint64_t Imm) {
373 Register ResultReg = createResultReg(RC);
374 const MCInstrDesc &II = TII.get(MachineInstOpcode);
375
376 if (II.getNumDefs() >= 1) {
377 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II,
378 ResultReg).addImm(Imm));
379 } else {
380 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II)
381 .addImm(Imm));
382 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
383 TII.get(TargetOpcode::COPY), ResultReg)
384 .addReg(II.implicit_defs()[0]));
385 }
386 return ResultReg;
387}
388
389// TODO: Don't worry about 64-bit now, but when this is fixed remove the
390// checks from the various callers.
391unsigned ARMFastISel::ARMMoveToFPReg(MVT VT, unsigned SrcReg) {
392 if (VT == MVT::f64) return 0;
393
394 Register MoveReg = createResultReg(TLI.getRegClassFor(VT));
395 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
396 TII.get(ARM::VMOVSR), MoveReg)
397 .addReg(SrcReg));
398 return MoveReg;
399}
400
401unsigned ARMFastISel::ARMMoveToIntReg(MVT VT, unsigned SrcReg) {
402 if (VT == MVT::i64) return 0;
403
404 Register MoveReg = createResultReg(TLI.getRegClassFor(VT));
405 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
406 TII.get(ARM::VMOVRS), MoveReg)
407 .addReg(SrcReg));
408 return MoveReg;
409}
410
411// For double width floating point we need to materialize two constants
412// (the high and the low) into integer registers then use a move to get
413// the combined constant into an FP reg.
414unsigned ARMFastISel::ARMMaterializeFP(const ConstantFP *CFP, MVT VT) {
415 const APFloat Val = CFP->getValueAPF();
416 bool is64bit = VT == MVT::f64;
417
418 // This checks to see if we can use VFP3 instructions to materialize
419 // a constant, otherwise we have to go through the constant pool.
420 if (TLI.isFPImmLegal(Val, VT)) {
421 int Imm;
422 unsigned Opc;
423 if (is64bit) {
424 Imm = ARM_AM::getFP64Imm(Val);
425 Opc = ARM::FCONSTD;
426 } else {
427 Imm = ARM_AM::getFP32Imm(Val);
428 Opc = ARM::FCONSTS;
429 }
430 Register DestReg = createResultReg(TLI.getRegClassFor(VT));
431 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
432 TII.get(Opc), DestReg).addImm(Imm));
433 return DestReg;
434 }
435
436 // Require VFP2 for loading fp constants.
437 if (!Subtarget->hasVFP2Base()) return false;
438
439 // MachineConstantPool wants an explicit alignment.
440 Align Alignment = DL.getPrefTypeAlign(CFP->getType());
441 unsigned Idx = MCP.getConstantPoolIndex(cast<Constant>(CFP), Alignment);
442 Register DestReg = createResultReg(TLI.getRegClassFor(VT));
443 unsigned Opc = is64bit ? ARM::VLDRD : ARM::VLDRS;
444
445 // The extra reg is for addrmode5.
446 AddOptionalDefs(
447 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), DestReg)
449 .addReg(0));
450 return DestReg;
451}
452
453unsigned ARMFastISel::ARMMaterializeInt(const Constant *C, MVT VT) {
454 if (VT != MVT::i32 && VT != MVT::i16 && VT != MVT::i8 && VT != MVT::i1)
455 return 0;
456
457 // If we can do this in a single instruction without a constant pool entry
458 // do so now.
459 const ConstantInt *CI = cast<ConstantInt>(C);
460 if (Subtarget->hasV6T2Ops() && isUInt<16>(CI->getZExtValue())) {
461 unsigned Opc = isThumb2 ? ARM::t2MOVi16 : ARM::MOVi16;
462 const TargetRegisterClass *RC = isThumb2 ? &ARM::rGPRRegClass :
463 &ARM::GPRRegClass;
464 Register ImmReg = createResultReg(RC);
465 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
466 TII.get(Opc), ImmReg)
467 .addImm(CI->getZExtValue()));
468 return ImmReg;
469 }
470
471 // Use MVN to emit negative constants.
472 if (VT == MVT::i32 && Subtarget->hasV6T2Ops() && CI->isNegative()) {
473 unsigned Imm = (unsigned)~(CI->getSExtValue());
474 bool UseImm = isThumb2 ? (ARM_AM::getT2SOImmVal(Imm) != -1) :
475 (ARM_AM::getSOImmVal(Imm) != -1);
476 if (UseImm) {
477 unsigned Opc = isThumb2 ? ARM::t2MVNi : ARM::MVNi;
478 const TargetRegisterClass *RC = isThumb2 ? &ARM::rGPRRegClass :
479 &ARM::GPRRegClass;
480 Register ImmReg = createResultReg(RC);
481 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
482 TII.get(Opc), ImmReg)
483 .addImm(Imm));
484 return ImmReg;
485 }
486 }
487
488 unsigned ResultReg = 0;
489 if (Subtarget->useMovt())
490 ResultReg = fastEmit_i(VT, VT, ISD::Constant, CI->getZExtValue());
491
492 if (ResultReg)
493 return ResultReg;
494
495 // Load from constant pool. For now 32-bit only.
496 if (VT != MVT::i32)
497 return 0;
498
499 // MachineConstantPool wants an explicit alignment.
500 Align Alignment = DL.getPrefTypeAlign(C->getType());
501 unsigned Idx = MCP.getConstantPoolIndex(C, Alignment);
502 ResultReg = createResultReg(TLI.getRegClassFor(VT));
503 if (isThumb2)
504 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
505 TII.get(ARM::t2LDRpci), ResultReg)
507 else {
508 // The extra immediate is for addrmode2.
509 ResultReg = constrainOperandRegClass(TII.get(ARM::LDRcp), ResultReg, 0);
510 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
511 TII.get(ARM::LDRcp), ResultReg)
513 .addImm(0));
514 }
515 return ResultReg;
516}
517
518bool ARMFastISel::isPositionIndependent() const {
519 return TLI.isPositionIndependent();
520}
521
522unsigned ARMFastISel::ARMMaterializeGV(const GlobalValue *GV, MVT VT) {
523 // For now 32-bit only.
524 if (VT != MVT::i32 || GV->isThreadLocal()) return 0;
525
526 // ROPI/RWPI not currently supported.
527 if (Subtarget->isROPI() || Subtarget->isRWPI())
528 return 0;
529
530 bool IsIndirect = Subtarget->isGVIndirectSymbol(GV);
531 const TargetRegisterClass *RC = isThumb2 ? &ARM::rGPRRegClass
532 : &ARM::GPRRegClass;
533 Register DestReg = createResultReg(RC);
534
535 // FastISel TLS support on non-MachO is broken, punt to SelectionDAG.
536 const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV);
537 bool IsThreadLocal = GVar && GVar->isThreadLocal();
538 if (!Subtarget->isTargetMachO() && IsThreadLocal) return 0;
539
540 bool IsPositionIndependent = isPositionIndependent();
541 // Use movw+movt when possible, it avoids constant pool entries.
542 // Non-darwin targets only support static movt relocations in FastISel.
543 if (Subtarget->useMovt() &&
544 (Subtarget->isTargetMachO() || !IsPositionIndependent)) {
545 unsigned Opc;
546 unsigned char TF = 0;
547 if (Subtarget->isTargetMachO())
549
550 if (IsPositionIndependent)
551 Opc = isThumb2 ? ARM::t2MOV_ga_pcrel : ARM::MOV_ga_pcrel;
552 else
553 Opc = isThumb2 ? ARM::t2MOVi32imm : ARM::MOVi32imm;
554 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
555 TII.get(Opc), DestReg).addGlobalAddress(GV, 0, TF));
556 } else {
557 // MachineConstantPool wants an explicit alignment.
558 Align Alignment = DL.getPrefTypeAlign(GV->getType());
559
560 if (Subtarget->isTargetELF() && IsPositionIndependent)
561 return ARMLowerPICELF(GV, VT);
562
563 // Grab index.
564 unsigned PCAdj = IsPositionIndependent ? (Subtarget->isThumb() ? 4 : 8) : 0;
565 unsigned Id = AFI->createPICLabelUId();
568 PCAdj);
569 unsigned Idx = MCP.getConstantPoolIndex(CPV, Alignment);
570
571 // Load value.
573 if (isThumb2) {
574 unsigned Opc = IsPositionIndependent ? ARM::t2LDRpci_pic : ARM::t2LDRpci;
575 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc),
576 DestReg).addConstantPoolIndex(Idx);
577 if (IsPositionIndependent)
578 MIB.addImm(Id);
579 AddOptionalDefs(MIB);
580 } else {
581 // The extra immediate is for addrmode2.
582 DestReg = constrainOperandRegClass(TII.get(ARM::LDRcp), DestReg, 0);
583 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
584 TII.get(ARM::LDRcp), DestReg)
586 .addImm(0);
587 AddOptionalDefs(MIB);
588
589 if (IsPositionIndependent) {
590 unsigned Opc = IsIndirect ? ARM::PICLDR : ARM::PICADD;
591 Register NewDestReg = createResultReg(TLI.getRegClassFor(VT));
592
593 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
594 MIMD, TII.get(Opc), NewDestReg)
595 .addReg(DestReg)
596 .addImm(Id);
597 AddOptionalDefs(MIB);
598 return NewDestReg;
599 }
600 }
601 }
602
603 if ((Subtarget->isTargetELF() && Subtarget->isGVInGOT(GV)) ||
604 (Subtarget->isTargetMachO() && IsIndirect)) {
606 Register NewDestReg = createResultReg(TLI.getRegClassFor(VT));
607 if (isThumb2)
608 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
609 TII.get(ARM::t2LDRi12), NewDestReg)
610 .addReg(DestReg)
611 .addImm(0);
612 else
613 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
614 TII.get(ARM::LDRi12), NewDestReg)
615 .addReg(DestReg)
616 .addImm(0);
617 DestReg = NewDestReg;
618 AddOptionalDefs(MIB);
619 }
620
621 return DestReg;
622}
623
624unsigned ARMFastISel::fastMaterializeConstant(const Constant *C) {
625 EVT CEVT = TLI.getValueType(DL, C->getType(), true);
626
627 // Only handle simple types.
628 if (!CEVT.isSimple()) return 0;
629 MVT VT = CEVT.getSimpleVT();
630
631 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C))
632 return ARMMaterializeFP(CFP, VT);
633 else if (const GlobalValue *GV = dyn_cast<GlobalValue>(C))
634 return ARMMaterializeGV(GV, VT);
635 else if (isa<ConstantInt>(C))
636 return ARMMaterializeInt(C, VT);
637
638 return 0;
639}
640
641// TODO: unsigned ARMFastISel::TargetMaterializeFloatZero(const ConstantFP *CF);
642
643unsigned ARMFastISel::fastMaterializeAlloca(const AllocaInst *AI) {
644 // Don't handle dynamic allocas.
645 if (!FuncInfo.StaticAllocaMap.count(AI)) return 0;
646
647 MVT VT;
648 if (!isLoadTypeLegal(AI->getType(), VT)) return 0;
649
651 FuncInfo.StaticAllocaMap.find(AI);
652
653 // This will get lowered later into the correct offsets and registers
654 // via rewriteXFrameIndex.
655 if (SI != FuncInfo.StaticAllocaMap.end()) {
656 unsigned Opc = isThumb2 ? ARM::t2ADDri : ARM::ADDri;
657 const TargetRegisterClass* RC = TLI.getRegClassFor(VT);
658 Register ResultReg = createResultReg(RC);
659 ResultReg = constrainOperandRegClass(TII.get(Opc), ResultReg, 0);
660
661 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
662 TII.get(Opc), ResultReg)
663 .addFrameIndex(SI->second)
664 .addImm(0));
665 return ResultReg;
666 }
667
668 return 0;
669}
670
671bool ARMFastISel::isTypeLegal(Type *Ty, MVT &VT) {
672 EVT evt = TLI.getValueType(DL, Ty, true);
673
674 // Only handle simple types.
675 if (evt == MVT::Other || !evt.isSimple()) return false;
676 VT = evt.getSimpleVT();
677
678 // Handle all legal types, i.e. a register that will directly hold this
679 // value.
680 return TLI.isTypeLegal(VT);
681}
682
683bool ARMFastISel::isLoadTypeLegal(Type *Ty, MVT &VT) {
684 if (isTypeLegal(Ty, VT)) return true;
685
686 // If this is a type than can be sign or zero-extended to a basic operation
687 // go ahead and accept it now.
688 if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)
689 return true;
690
691 return false;
692}
693
694// Computes the address to get to an object.
695bool ARMFastISel::ARMComputeAddress(const Value *Obj, Address &Addr) {
696 // Some boilerplate from the X86 FastISel.
697 const User *U = nullptr;
698 unsigned Opcode = Instruction::UserOp1;
699 if (const Instruction *I = dyn_cast<Instruction>(Obj)) {
700 // Don't walk into other basic blocks unless the object is an alloca from
701 // another block, otherwise it may not have a virtual register assigned.
702 if (FuncInfo.StaticAllocaMap.count(static_cast<const AllocaInst *>(Obj)) ||
703 FuncInfo.getMBB(I->getParent()) == FuncInfo.MBB) {
704 Opcode = I->getOpcode();
705 U = I;
706 }
707 } else if (const ConstantExpr *C = dyn_cast<ConstantExpr>(Obj)) {
708 Opcode = C->getOpcode();
709 U = C;
710 }
711
712 if (PointerType *Ty = dyn_cast<PointerType>(Obj->getType()))
713 if (Ty->getAddressSpace() > 255)
714 // Fast instruction selection doesn't support the special
715 // address spaces.
716 return false;
717
718 switch (Opcode) {
719 default:
720 break;
721 case Instruction::BitCast:
722 // Look through bitcasts.
723 return ARMComputeAddress(U->getOperand(0), Addr);
724 case Instruction::IntToPtr:
725 // Look past no-op inttoptrs.
726 if (TLI.getValueType(DL, U->getOperand(0)->getType()) ==
727 TLI.getPointerTy(DL))
728 return ARMComputeAddress(U->getOperand(0), Addr);
729 break;
730 case Instruction::PtrToInt:
731 // Look past no-op ptrtoints.
732 if (TLI.getValueType(DL, U->getType()) == TLI.getPointerTy(DL))
733 return ARMComputeAddress(U->getOperand(0), Addr);
734 break;
735 case Instruction::GetElementPtr: {
736 Address SavedAddr = Addr;
737 int TmpOffset = Addr.Offset;
738
739 // Iterate through the GEP folding the constants into offsets where
740 // we can.
742 for (User::const_op_iterator i = U->op_begin() + 1, e = U->op_end();
743 i != e; ++i, ++GTI) {
744 const Value *Op = *i;
745 if (StructType *STy = GTI.getStructTypeOrNull()) {
746 const StructLayout *SL = DL.getStructLayout(STy);
747 unsigned Idx = cast<ConstantInt>(Op)->getZExtValue();
748 TmpOffset += SL->getElementOffset(Idx);
749 } else {
751 while (true) {
752 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Op)) {
753 // Constant-offset addressing.
754 TmpOffset += CI->getSExtValue() * S;
755 break;
756 }
757 if (canFoldAddIntoGEP(U, Op)) {
758 // A compatible add with a constant operand. Fold the constant.
759 ConstantInt *CI =
760 cast<ConstantInt>(cast<AddOperator>(Op)->getOperand(1));
761 TmpOffset += CI->getSExtValue() * S;
762 // Iterate on the other operand.
763 Op = cast<AddOperator>(Op)->getOperand(0);
764 continue;
765 }
766 // Unsupported
767 goto unsupported_gep;
768 }
769 }
770 }
771
772 // Try to grab the base operand now.
773 Addr.Offset = TmpOffset;
774 if (ARMComputeAddress(U->getOperand(0), Addr)) return true;
775
776 // We failed, restore everything and try the other options.
777 Addr = SavedAddr;
778
779 unsupported_gep:
780 break;
781 }
782 case Instruction::Alloca: {
783 const AllocaInst *AI = cast<AllocaInst>(Obj);
785 FuncInfo.StaticAllocaMap.find(AI);
786 if (SI != FuncInfo.StaticAllocaMap.end()) {
787 Addr.BaseType = Address::FrameIndexBase;
788 Addr.Base.FI = SI->second;
789 return true;
790 }
791 break;
792 }
793 }
794
795 // Try to get this in a register if nothing else has worked.
796 if (Addr.Base.Reg == 0) Addr.Base.Reg = getRegForValue(Obj);
797 return Addr.Base.Reg != 0;
798}
799
800void ARMFastISel::ARMSimplifyAddress(Address &Addr, MVT VT, bool useAM3) {
801 bool needsLowering = false;
802 switch (VT.SimpleTy) {
803 default: llvm_unreachable("Unhandled load/store type!");
804 case MVT::i1:
805 case MVT::i8:
806 case MVT::i16:
807 case MVT::i32:
808 if (!useAM3) {
809 // Integer loads/stores handle 12-bit offsets.
810 needsLowering = ((Addr.Offset & 0xfff) != Addr.Offset);
811 // Handle negative offsets.
812 if (needsLowering && isThumb2)
813 needsLowering = !(Subtarget->hasV6T2Ops() && Addr.Offset < 0 &&
814 Addr.Offset > -256);
815 } else {
816 // ARM halfword load/stores and signed byte loads use +/-imm8 offsets.
817 needsLowering = (Addr.Offset > 255 || Addr.Offset < -255);
818 }
819 break;
820 case MVT::f32:
821 case MVT::f64:
822 // Floating point operands handle 8-bit offsets.
823 needsLowering = ((Addr.Offset & 0xff) != Addr.Offset);
824 break;
825 }
826
827 // If this is a stack pointer and the offset needs to be simplified then
828 // put the alloca address into a register, set the base type back to
829 // register and continue. This should almost never happen.
830 if (needsLowering && Addr.BaseType == Address::FrameIndexBase) {
831 const TargetRegisterClass *RC = isThumb2 ? &ARM::tGPRRegClass
832 : &ARM::GPRRegClass;
833 Register ResultReg = createResultReg(RC);
834 unsigned Opc = isThumb2 ? ARM::t2ADDri : ARM::ADDri;
835 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
836 TII.get(Opc), ResultReg)
837 .addFrameIndex(Addr.Base.FI)
838 .addImm(0));
839 Addr.Base.Reg = ResultReg;
840 Addr.BaseType = Address::RegBase;
841 }
842
843 // Since the offset is too large for the load/store instruction
844 // get the reg+offset into a register.
845 if (needsLowering) {
846 Addr.Base.Reg = fastEmit_ri_(MVT::i32, ISD::ADD, Addr.Base.Reg,
847 Addr.Offset, MVT::i32);
848 Addr.Offset = 0;
849 }
850}
851
852void ARMFastISel::AddLoadStoreOperands(MVT VT, Address &Addr,
853 const MachineInstrBuilder &MIB,
855 bool useAM3) {
856 // addrmode5 output depends on the selection dag addressing dividing the
857 // offset by 4 that it then later multiplies. Do this here as well.
858 if (VT.SimpleTy == MVT::f32 || VT.SimpleTy == MVT::f64)
859 Addr.Offset /= 4;
860
861 // Frame base works a bit differently. Handle it separately.
862 if (Addr.BaseType == Address::FrameIndexBase) {
863 int FI = Addr.Base.FI;
864 int Offset = Addr.Offset;
865 MachineMemOperand *MMO = FuncInfo.MF->getMachineMemOperand(
866 MachinePointerInfo::getFixedStack(*FuncInfo.MF, FI, Offset), Flags,
867 MFI.getObjectSize(FI), MFI.getObjectAlign(FI));
868 // Now add the rest of the operands.
869 MIB.addFrameIndex(FI);
870
871 // ARM halfword load/stores and signed byte loads need an additional
872 // operand.
873 if (useAM3) {
874 int Imm = (Addr.Offset < 0) ? (0x100 | -Addr.Offset) : Addr.Offset;
875 MIB.addReg(0);
876 MIB.addImm(Imm);
877 } else {
878 MIB.addImm(Addr.Offset);
879 }
880 MIB.addMemOperand(MMO);
881 } else {
882 // Now add the rest of the operands.
883 MIB.addReg(Addr.Base.Reg);
884
885 // ARM halfword load/stores and signed byte loads need an additional
886 // operand.
887 if (useAM3) {
888 int Imm = (Addr.Offset < 0) ? (0x100 | -Addr.Offset) : Addr.Offset;
889 MIB.addReg(0);
890 MIB.addImm(Imm);
891 } else {
892 MIB.addImm(Addr.Offset);
893 }
894 }
895 AddOptionalDefs(MIB);
896}
897
898bool ARMFastISel::ARMEmitLoad(MVT VT, Register &ResultReg, Address &Addr,
899 MaybeAlign Alignment, bool isZExt,
900 bool allocReg) {
901 unsigned Opc;
902 bool useAM3 = false;
903 bool needVMOV = false;
904 const TargetRegisterClass *RC;
905 switch (VT.SimpleTy) {
906 // This is mostly going to be Neon/vector support.
907 default: return false;
908 case MVT::i1:
909 case MVT::i8:
910 if (isThumb2) {
911 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops())
912 Opc = isZExt ? ARM::t2LDRBi8 : ARM::t2LDRSBi8;
913 else
914 Opc = isZExt ? ARM::t2LDRBi12 : ARM::t2LDRSBi12;
915 } else {
916 if (isZExt) {
917 Opc = ARM::LDRBi12;
918 } else {
919 Opc = ARM::LDRSB;
920 useAM3 = true;
921 }
922 }
923 RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRnopcRegClass;
924 break;
925 case MVT::i16:
926 if (Alignment && *Alignment < Align(2) &&
927 !Subtarget->allowsUnalignedMem())
928 return false;
929
930 if (isThumb2) {
931 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops())
932 Opc = isZExt ? ARM::t2LDRHi8 : ARM::t2LDRSHi8;
933 else
934 Opc = isZExt ? ARM::t2LDRHi12 : ARM::t2LDRSHi12;
935 } else {
936 Opc = isZExt ? ARM::LDRH : ARM::LDRSH;
937 useAM3 = true;
938 }
939 RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRnopcRegClass;
940 break;
941 case MVT::i32:
942 if (Alignment && *Alignment < Align(4) &&
943 !Subtarget->allowsUnalignedMem())
944 return false;
945
946 if (isThumb2) {
947 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops())
948 Opc = ARM::t2LDRi8;
949 else
950 Opc = ARM::t2LDRi12;
951 } else {
952 Opc = ARM::LDRi12;
953 }
954 RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRnopcRegClass;
955 break;
956 case MVT::f32:
957 if (!Subtarget->hasVFP2Base()) return false;
958 // Unaligned loads need special handling. Floats require word-alignment.
959 if (Alignment && *Alignment < Align(4)) {
960 needVMOV = true;
961 VT = MVT::i32;
962 Opc = isThumb2 ? ARM::t2LDRi12 : ARM::LDRi12;
963 RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRnopcRegClass;
964 } else {
965 Opc = ARM::VLDRS;
966 RC = TLI.getRegClassFor(VT);
967 }
968 break;
969 case MVT::f64:
970 // Can load and store double precision even without FeatureFP64
971 if (!Subtarget->hasVFP2Base()) return false;
972 // FIXME: Unaligned loads need special handling. Doublewords require
973 // word-alignment.
974 if (Alignment && *Alignment < Align(4))
975 return false;
976
977 Opc = ARM::VLDRD;
978 RC = TLI.getRegClassFor(VT);
979 break;
980 }
981 // Simplify this down to something we can handle.
982 ARMSimplifyAddress(Addr, VT, useAM3);
983
984 // Create the base instruction, then add the operands.
985 if (allocReg)
986 ResultReg = createResultReg(RC);
987 assert(ResultReg > 255 && "Expected an allocated virtual register.");
988 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
989 TII.get(Opc), ResultReg);
990 AddLoadStoreOperands(VT, Addr, MIB, MachineMemOperand::MOLoad, useAM3);
991
992 // If we had an unaligned load of a float we've converted it to an regular
993 // load. Now we must move from the GRP to the FP register.
994 if (needVMOV) {
995 Register MoveReg = createResultReg(TLI.getRegClassFor(MVT::f32));
996 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
997 TII.get(ARM::VMOVSR), MoveReg)
998 .addReg(ResultReg));
999 ResultReg = MoveReg;
1000 }
1001 return true;
1002}
1003
1004bool ARMFastISel::SelectLoad(const Instruction *I) {
1005 // Atomic loads need special handling.
1006 if (cast<LoadInst>(I)->isAtomic())
1007 return false;
1008
1009 const Value *SV = I->getOperand(0);
1010 if (TLI.supportSwiftError()) {
1011 // Swifterror values can come from either a function parameter with
1012 // swifterror attribute or an alloca with swifterror attribute.
1013 if (const Argument *Arg = dyn_cast<Argument>(SV)) {
1014 if (Arg->hasSwiftErrorAttr())
1015 return false;
1016 }
1017
1018 if (const AllocaInst *Alloca = dyn_cast<AllocaInst>(SV)) {
1019 if (Alloca->isSwiftError())
1020 return false;
1021 }
1022 }
1023
1024 // Verify we have a legal type before going any further.
1025 MVT VT;
1026 if (!isLoadTypeLegal(I->getType(), VT))
1027 return false;
1028
1029 // See if we can handle this address.
1030 Address Addr;
1031 if (!ARMComputeAddress(I->getOperand(0), Addr)) return false;
1032
1033 Register ResultReg;
1034 if (!ARMEmitLoad(VT, ResultReg, Addr, cast<LoadInst>(I)->getAlign()))
1035 return false;
1036 updateValueMap(I, ResultReg);
1037 return true;
1038}
1039
1040bool ARMFastISel::ARMEmitStore(MVT VT, unsigned SrcReg, Address &Addr,
1041 MaybeAlign Alignment) {
1042 unsigned StrOpc;
1043 bool useAM3 = false;
1044 switch (VT.SimpleTy) {
1045 // This is mostly going to be Neon/vector support.
1046 default: return false;
1047 case MVT::i1: {
1048 Register Res = createResultReg(isThumb2 ? &ARM::tGPRRegClass
1049 : &ARM::GPRRegClass);
1050 unsigned Opc = isThumb2 ? ARM::t2ANDri : ARM::ANDri;
1051 SrcReg = constrainOperandRegClass(TII.get(Opc), SrcReg, 1);
1052 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1053 TII.get(Opc), Res)
1054 .addReg(SrcReg).addImm(1));
1055 SrcReg = Res;
1056 [[fallthrough]];
1057 }
1058 case MVT::i8:
1059 if (isThumb2) {
1060 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops())
1061 StrOpc = ARM::t2STRBi8;
1062 else
1063 StrOpc = ARM::t2STRBi12;
1064 } else {
1065 StrOpc = ARM::STRBi12;
1066 }
1067 break;
1068 case MVT::i16:
1069 if (Alignment && *Alignment < Align(2) &&
1070 !Subtarget->allowsUnalignedMem())
1071 return false;
1072
1073 if (isThumb2) {
1074 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops())
1075 StrOpc = ARM::t2STRHi8;
1076 else
1077 StrOpc = ARM::t2STRHi12;
1078 } else {
1079 StrOpc = ARM::STRH;
1080 useAM3 = true;
1081 }
1082 break;
1083 case MVT::i32:
1084 if (Alignment && *Alignment < Align(4) &&
1085 !Subtarget->allowsUnalignedMem())
1086 return false;
1087
1088 if (isThumb2) {
1089 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops())
1090 StrOpc = ARM::t2STRi8;
1091 else
1092 StrOpc = ARM::t2STRi12;
1093 } else {
1094 StrOpc = ARM::STRi12;
1095 }
1096 break;
1097 case MVT::f32:
1098 if (!Subtarget->hasVFP2Base()) return false;
1099 // Unaligned stores need special handling. Floats require word-alignment.
1100 if (Alignment && *Alignment < Align(4)) {
1101 Register MoveReg = createResultReg(TLI.getRegClassFor(MVT::i32));
1102 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1103 TII.get(ARM::VMOVRS), MoveReg)
1104 .addReg(SrcReg));
1105 SrcReg = MoveReg;
1106 VT = MVT::i32;
1107 StrOpc = isThumb2 ? ARM::t2STRi12 : ARM::STRi12;
1108 } else {
1109 StrOpc = ARM::VSTRS;
1110 }
1111 break;
1112 case MVT::f64:
1113 // Can load and store double precision even without FeatureFP64
1114 if (!Subtarget->hasVFP2Base()) return false;
1115 // FIXME: Unaligned stores need special handling. Doublewords require
1116 // word-alignment.
1117 if (Alignment && *Alignment < Align(4))
1118 return false;
1119
1120 StrOpc = ARM::VSTRD;
1121 break;
1122 }
1123 // Simplify this down to something we can handle.
1124 ARMSimplifyAddress(Addr, VT, useAM3);
1125
1126 // Create the base instruction, then add the operands.
1127 SrcReg = constrainOperandRegClass(TII.get(StrOpc), SrcReg, 0);
1128 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1129 TII.get(StrOpc))
1130 .addReg(SrcReg);
1131 AddLoadStoreOperands(VT, Addr, MIB, MachineMemOperand::MOStore, useAM3);
1132 return true;
1133}
1134
1135bool ARMFastISel::SelectStore(const Instruction *I) {
1136 Value *Op0 = I->getOperand(0);
1137 unsigned SrcReg = 0;
1138
1139 // Atomic stores need special handling.
1140 if (cast<StoreInst>(I)->isAtomic())
1141 return false;
1142
1143 const Value *PtrV = I->getOperand(1);
1144 if (TLI.supportSwiftError()) {
1145 // Swifterror values can come from either a function parameter with
1146 // swifterror attribute or an alloca with swifterror attribute.
1147 if (const Argument *Arg = dyn_cast<Argument>(PtrV)) {
1148 if (Arg->hasSwiftErrorAttr())
1149 return false;
1150 }
1151
1152 if (const AllocaInst *Alloca = dyn_cast<AllocaInst>(PtrV)) {
1153 if (Alloca->isSwiftError())
1154 return false;
1155 }
1156 }
1157
1158 // Verify we have a legal type before going any further.
1159 MVT VT;
1160 if (!isLoadTypeLegal(I->getOperand(0)->getType(), VT))
1161 return false;
1162
1163 // Get the value to be stored into a register.
1164 SrcReg = getRegForValue(Op0);
1165 if (SrcReg == 0) return false;
1166
1167 // See if we can handle this address.
1168 Address Addr;
1169 if (!ARMComputeAddress(I->getOperand(1), Addr))
1170 return false;
1171
1172 if (!ARMEmitStore(VT, SrcReg, Addr, cast<StoreInst>(I)->getAlign()))
1173 return false;
1174 return true;
1175}
1176
1178 switch (Pred) {
1179 // Needs two compares...
1180 case CmpInst::FCMP_ONE:
1181 case CmpInst::FCMP_UEQ:
1182 default:
1183 // AL is our "false" for now. The other two need more compares.
1184 return ARMCC::AL;
1185 case CmpInst::ICMP_EQ:
1186 case CmpInst::FCMP_OEQ:
1187 return ARMCC::EQ;
1188 case CmpInst::ICMP_SGT:
1189 case CmpInst::FCMP_OGT:
1190 return ARMCC::GT;
1191 case CmpInst::ICMP_SGE:
1192 case CmpInst::FCMP_OGE:
1193 return ARMCC::GE;
1194 case CmpInst::ICMP_UGT:
1195 case CmpInst::FCMP_UGT:
1196 return ARMCC::HI;
1197 case CmpInst::FCMP_OLT:
1198 return ARMCC::MI;
1199 case CmpInst::ICMP_ULE:
1200 case CmpInst::FCMP_OLE:
1201 return ARMCC::LS;
1202 case CmpInst::FCMP_ORD:
1203 return ARMCC::VC;
1204 case CmpInst::FCMP_UNO:
1205 return ARMCC::VS;
1206 case CmpInst::FCMP_UGE:
1207 return ARMCC::PL;
1208 case CmpInst::ICMP_SLT:
1209 case CmpInst::FCMP_ULT:
1210 return ARMCC::LT;
1211 case CmpInst::ICMP_SLE:
1212 case CmpInst::FCMP_ULE:
1213 return ARMCC::LE;
1214 case CmpInst::FCMP_UNE:
1215 case CmpInst::ICMP_NE:
1216 return ARMCC::NE;
1217 case CmpInst::ICMP_UGE:
1218 return ARMCC::HS;
1219 case CmpInst::ICMP_ULT:
1220 return ARMCC::LO;
1221 }
1222}
1223
1224bool ARMFastISel::SelectBranch(const Instruction *I) {
1225 const BranchInst *BI = cast<BranchInst>(I);
1226 MachineBasicBlock *TBB = FuncInfo.getMBB(BI->getSuccessor(0));
1227 MachineBasicBlock *FBB = FuncInfo.getMBB(BI->getSuccessor(1));
1228
1229 // Simple branch support.
1230
1231 // If we can, avoid recomputing the compare - redoing it could lead to wonky
1232 // behavior.
1233 if (const CmpInst *CI = dyn_cast<CmpInst>(BI->getCondition())) {
1234 if (CI->hasOneUse() && (CI->getParent() == I->getParent())) {
1235 // Get the compare predicate.
1236 // Try to take advantage of fallthrough opportunities.
1237 CmpInst::Predicate Predicate = CI->getPredicate();
1238 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) {
1239 std::swap(TBB, FBB);
1241 }
1242
1243 ARMCC::CondCodes ARMPred = getComparePred(Predicate);
1244
1245 // We may not handle every CC for now.
1246 if (ARMPred == ARMCC::AL) return false;
1247
1248 // Emit the compare.
1249 if (!ARMEmitCmp(CI->getOperand(0), CI->getOperand(1), CI->isUnsigned()))
1250 return false;
1251
1252 unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc;
1253 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(BrOpc))
1254 .addMBB(TBB).addImm(ARMPred).addReg(ARM::CPSR);
1255 finishCondBranch(BI->getParent(), TBB, FBB);
1256 return true;
1257 }
1258 } else if (TruncInst *TI = dyn_cast<TruncInst>(BI->getCondition())) {
1259 MVT SourceVT;
1260 if (TI->hasOneUse() && TI->getParent() == I->getParent() &&
1261 (isLoadTypeLegal(TI->getOperand(0)->getType(), SourceVT))) {
1262 unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri;
1263 Register OpReg = getRegForValue(TI->getOperand(0));
1264 OpReg = constrainOperandRegClass(TII.get(TstOpc), OpReg, 0);
1265 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1266 TII.get(TstOpc))
1267 .addReg(OpReg).addImm(1));
1268
1269 unsigned CCMode = ARMCC::NE;
1270 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) {
1271 std::swap(TBB, FBB);
1272 CCMode = ARMCC::EQ;
1273 }
1274
1275 unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc;
1276 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(BrOpc))
1277 .addMBB(TBB).addImm(CCMode).addReg(ARM::CPSR);
1278
1279 finishCondBranch(BI->getParent(), TBB, FBB);
1280 return true;
1281 }
1282 } else if (const ConstantInt *CI =
1283 dyn_cast<ConstantInt>(BI->getCondition())) {
1284 uint64_t Imm = CI->getZExtValue();
1285 MachineBasicBlock *Target = (Imm == 0) ? FBB : TBB;
1286 fastEmitBranch(Target, MIMD.getDL());
1287 return true;
1288 }
1289
1290 Register CmpReg = getRegForValue(BI->getCondition());
1291 if (CmpReg == 0) return false;
1292
1293 // We've been divorced from our compare! Our block was split, and
1294 // now our compare lives in a predecessor block. We musn't
1295 // re-compare here, as the children of the compare aren't guaranteed
1296 // live across the block boundary (we *could* check for this).
1297 // Regardless, the compare has been done in the predecessor block,
1298 // and it left a value for us in a virtual register. Ergo, we test
1299 // the one-bit value left in the virtual register.
1300 unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri;
1301 CmpReg = constrainOperandRegClass(TII.get(TstOpc), CmpReg, 0);
1302 AddOptionalDefs(
1303 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TstOpc))
1304 .addReg(CmpReg)
1305 .addImm(1));
1306
1307 unsigned CCMode = ARMCC::NE;
1308 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) {
1309 std::swap(TBB, FBB);
1310 CCMode = ARMCC::EQ;
1311 }
1312
1313 unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc;
1314 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(BrOpc))
1315 .addMBB(TBB).addImm(CCMode).addReg(ARM::CPSR);
1316 finishCondBranch(BI->getParent(), TBB, FBB);
1317 return true;
1318}
1319
1320bool ARMFastISel::SelectIndirectBr(const Instruction *I) {
1321 Register AddrReg = getRegForValue(I->getOperand(0));
1322 if (AddrReg == 0) return false;
1323
1324 unsigned Opc = isThumb2 ? ARM::tBRIND : ARM::BX;
1325 assert(isThumb2 || Subtarget->hasV4TOps());
1326
1327 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1328 TII.get(Opc)).addReg(AddrReg));
1329
1330 const IndirectBrInst *IB = cast<IndirectBrInst>(I);
1331 for (const BasicBlock *SuccBB : IB->successors())
1332 FuncInfo.MBB->addSuccessor(FuncInfo.getMBB(SuccBB));
1333
1334 return true;
1335}
1336
1337bool ARMFastISel::ARMEmitCmp(const Value *Src1Value, const Value *Src2Value,
1338 bool isZExt) {
1339 Type *Ty = Src1Value->getType();
1340 EVT SrcEVT = TLI.getValueType(DL, Ty, true);
1341 if (!SrcEVT.isSimple()) return false;
1342 MVT SrcVT = SrcEVT.getSimpleVT();
1343
1344 if (Ty->isFloatTy() && !Subtarget->hasVFP2Base())
1345 return false;
1346
1347 if (Ty->isDoubleTy() && (!Subtarget->hasVFP2Base() || !Subtarget->hasFP64()))
1348 return false;
1349
1350 // Check to see if the 2nd operand is a constant that we can encode directly
1351 // in the compare.
1352 int Imm = 0;
1353 bool UseImm = false;
1354 bool isNegativeImm = false;
1355 // FIXME: At -O0 we don't have anything that canonicalizes operand order.
1356 // Thus, Src1Value may be a ConstantInt, but we're missing it.
1357 if (const ConstantInt *ConstInt = dyn_cast<ConstantInt>(Src2Value)) {
1358 if (SrcVT == MVT::i32 || SrcVT == MVT::i16 || SrcVT == MVT::i8 ||
1359 SrcVT == MVT::i1) {
1360 const APInt &CIVal = ConstInt->getValue();
1361 Imm = (isZExt) ? (int)CIVal.getZExtValue() : (int)CIVal.getSExtValue();
1362 // For INT_MIN/LONG_MIN (i.e., 0x80000000) we need to use a cmp, rather
1363 // then a cmn, because there is no way to represent 2147483648 as a
1364 // signed 32-bit int.
1365 if (Imm < 0 && Imm != (int)0x80000000) {
1366 isNegativeImm = true;
1367 Imm = -Imm;
1368 }
1369 UseImm = isThumb2 ? (ARM_AM::getT2SOImmVal(Imm) != -1) :
1370 (ARM_AM::getSOImmVal(Imm) != -1);
1371 }
1372 } else if (const ConstantFP *ConstFP = dyn_cast<ConstantFP>(Src2Value)) {
1373 if (SrcVT == MVT::f32 || SrcVT == MVT::f64)
1374 if (ConstFP->isZero() && !ConstFP->isNegative())
1375 UseImm = true;
1376 }
1377
1378 unsigned CmpOpc;
1379 bool isICmp = true;
1380 bool needsExt = false;
1381 switch (SrcVT.SimpleTy) {
1382 default: return false;
1383 // TODO: Verify compares.
1384 case MVT::f32:
1385 isICmp = false;
1386 CmpOpc = UseImm ? ARM::VCMPZS : ARM::VCMPS;
1387 break;
1388 case MVT::f64:
1389 isICmp = false;
1390 CmpOpc = UseImm ? ARM::VCMPZD : ARM::VCMPD;
1391 break;
1392 case MVT::i1:
1393 case MVT::i8:
1394 case MVT::i16:
1395 needsExt = true;
1396 [[fallthrough]];
1397 case MVT::i32:
1398 if (isThumb2) {
1399 if (!UseImm)
1400 CmpOpc = ARM::t2CMPrr;
1401 else
1402 CmpOpc = isNegativeImm ? ARM::t2CMNri : ARM::t2CMPri;
1403 } else {
1404 if (!UseImm)
1405 CmpOpc = ARM::CMPrr;
1406 else
1407 CmpOpc = isNegativeImm ? ARM::CMNri : ARM::CMPri;
1408 }
1409 break;
1410 }
1411
1412 Register SrcReg1 = getRegForValue(Src1Value);
1413 if (SrcReg1 == 0) return false;
1414
1415 unsigned SrcReg2 = 0;
1416 if (!UseImm) {
1417 SrcReg2 = getRegForValue(Src2Value);
1418 if (SrcReg2 == 0) return false;
1419 }
1420
1421 // We have i1, i8, or i16, we need to either zero extend or sign extend.
1422 if (needsExt) {
1423 SrcReg1 = ARMEmitIntExt(SrcVT, SrcReg1, MVT::i32, isZExt);
1424 if (SrcReg1 == 0) return false;
1425 if (!UseImm) {
1426 SrcReg2 = ARMEmitIntExt(SrcVT, SrcReg2, MVT::i32, isZExt);
1427 if (SrcReg2 == 0) return false;
1428 }
1429 }
1430
1431 const MCInstrDesc &II = TII.get(CmpOpc);
1432 SrcReg1 = constrainOperandRegClass(II, SrcReg1, 0);
1433 if (!UseImm) {
1434 SrcReg2 = constrainOperandRegClass(II, SrcReg2, 1);
1435 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II)
1436 .addReg(SrcReg1).addReg(SrcReg2));
1437 } else {
1439 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II)
1440 .addReg(SrcReg1);
1441
1442 // Only add immediate for icmp as the immediate for fcmp is an implicit 0.0.
1443 if (isICmp)
1444 MIB.addImm(Imm);
1445 AddOptionalDefs(MIB);
1446 }
1447
1448 // For floating point we need to move the result to a comparison register
1449 // that we can then use for branches.
1450 if (Ty->isFloatTy() || Ty->isDoubleTy())
1451 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1452 TII.get(ARM::FMSTAT)));
1453 return true;
1454}
1455
1456bool ARMFastISel::SelectCmp(const Instruction *I) {
1457 const CmpInst *CI = cast<CmpInst>(I);
1458
1459 // Get the compare predicate.
1461
1462 // We may not handle every CC for now.
1463 if (ARMPred == ARMCC::AL) return false;
1464
1465 // Emit the compare.
1466 if (!ARMEmitCmp(CI->getOperand(0), CI->getOperand(1), CI->isUnsigned()))
1467 return false;
1468
1469 // Now set a register based on the comparison. Explicitly set the predicates
1470 // here.
1471 unsigned MovCCOpc = isThumb2 ? ARM::t2MOVCCi : ARM::MOVCCi;
1472 const TargetRegisterClass *RC = isThumb2 ? &ARM::rGPRRegClass
1473 : &ARM::GPRRegClass;
1474 Register DestReg = createResultReg(RC);
1475 Constant *Zero = ConstantInt::get(Type::getInt32Ty(*Context), 0);
1476 unsigned ZeroReg = fastMaterializeConstant(Zero);
1477 // ARMEmitCmp emits a FMSTAT when necessary, so it's always safe to use CPSR.
1478 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(MovCCOpc), DestReg)
1479 .addReg(ZeroReg).addImm(1)
1480 .addImm(ARMPred).addReg(ARM::CPSR);
1481
1482 updateValueMap(I, DestReg);
1483 return true;
1484}
1485
1486bool ARMFastISel::SelectFPExt(const Instruction *I) {
1487 // Make sure we have VFP and that we're extending float to double.
1488 if (!Subtarget->hasVFP2Base() || !Subtarget->hasFP64()) return false;
1489
1490 Value *V = I->getOperand(0);
1491 if (!I->getType()->isDoubleTy() ||
1492 !V->getType()->isFloatTy()) return false;
1493
1494 Register Op = getRegForValue(V);
1495 if (Op == 0) return false;
1496
1497 Register Result = createResultReg(&ARM::DPRRegClass);
1498 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1499 TII.get(ARM::VCVTDS), Result)
1500 .addReg(Op));
1501 updateValueMap(I, Result);
1502 return true;
1503}
1504
1505bool ARMFastISel::SelectFPTrunc(const Instruction *I) {
1506 // Make sure we have VFP and that we're truncating double to float.
1507 if (!Subtarget->hasVFP2Base() || !Subtarget->hasFP64()) return false;
1508
1509 Value *V = I->getOperand(0);
1510 if (!(I->getType()->isFloatTy() &&
1511 V->getType()->isDoubleTy())) return false;
1512
1513 Register Op = getRegForValue(V);
1514 if (Op == 0) return false;
1515
1516 Register Result = createResultReg(&ARM::SPRRegClass);
1517 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1518 TII.get(ARM::VCVTSD), Result)
1519 .addReg(Op));
1520 updateValueMap(I, Result);
1521 return true;
1522}
1523
1524bool ARMFastISel::SelectIToFP(const Instruction *I, bool isSigned) {
1525 // Make sure we have VFP.
1526 if (!Subtarget->hasVFP2Base()) return false;
1527
1528 MVT DstVT;
1529 Type *Ty = I->getType();
1530 if (!isTypeLegal(Ty, DstVT))
1531 return false;
1532
1533 Value *Src = I->getOperand(0);
1534 EVT SrcEVT = TLI.getValueType(DL, Src->getType(), true);
1535 if (!SrcEVT.isSimple())
1536 return false;
1537 MVT SrcVT = SrcEVT.getSimpleVT();
1538 if (SrcVT != MVT::i32 && SrcVT != MVT::i16 && SrcVT != MVT::i8)
1539 return false;
1540
1541 Register SrcReg = getRegForValue(Src);
1542 if (SrcReg == 0) return false;
1543
1544 // Handle sign-extension.
1545 if (SrcVT == MVT::i16 || SrcVT == MVT::i8) {
1546 SrcReg = ARMEmitIntExt(SrcVT, SrcReg, MVT::i32,
1547 /*isZExt*/!isSigned);
1548 if (SrcReg == 0) return false;
1549 }
1550
1551 // The conversion routine works on fp-reg to fp-reg and the operand above
1552 // was an integer, move it to the fp registers if possible.
1553 unsigned FP = ARMMoveToFPReg(MVT::f32, SrcReg);
1554 if (FP == 0) return false;
1555
1556 unsigned Opc;
1557 if (Ty->isFloatTy()) Opc = isSigned ? ARM::VSITOS : ARM::VUITOS;
1558 else if (Ty->isDoubleTy() && Subtarget->hasFP64())
1559 Opc = isSigned ? ARM::VSITOD : ARM::VUITOD;
1560 else return false;
1561
1562 Register ResultReg = createResultReg(TLI.getRegClassFor(DstVT));
1563 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1564 TII.get(Opc), ResultReg).addReg(FP));
1565 updateValueMap(I, ResultReg);
1566 return true;
1567}
1568
1569bool ARMFastISel::SelectFPToI(const Instruction *I, bool isSigned) {
1570 // Make sure we have VFP.
1571 if (!Subtarget->hasVFP2Base()) return false;
1572
1573 MVT DstVT;
1574 Type *RetTy = I->getType();
1575 if (!isTypeLegal(RetTy, DstVT))
1576 return false;
1577
1578 Register Op = getRegForValue(I->getOperand(0));
1579 if (Op == 0) return false;
1580
1581 unsigned Opc;
1582 Type *OpTy = I->getOperand(0)->getType();
1583 if (OpTy->isFloatTy()) Opc = isSigned ? ARM::VTOSIZS : ARM::VTOUIZS;
1584 else if (OpTy->isDoubleTy() && Subtarget->hasFP64())
1585 Opc = isSigned ? ARM::VTOSIZD : ARM::VTOUIZD;
1586 else return false;
1587
1588 // f64->s32/u32 or f32->s32/u32 both need an intermediate f32 reg.
1589 Register ResultReg = createResultReg(TLI.getRegClassFor(MVT::f32));
1590 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1591 TII.get(Opc), ResultReg).addReg(Op));
1592
1593 // This result needs to be in an integer register, but the conversion only
1594 // takes place in fp-regs.
1595 unsigned IntReg = ARMMoveToIntReg(DstVT, ResultReg);
1596 if (IntReg == 0) return false;
1597
1598 updateValueMap(I, IntReg);
1599 return true;
1600}
1601
1602bool ARMFastISel::SelectSelect(const Instruction *I) {
1603 MVT VT;
1604 if (!isTypeLegal(I->getType(), VT))
1605 return false;
1606
1607 // Things need to be register sized for register moves.
1608 if (VT != MVT::i32) return false;
1609
1610 Register CondReg = getRegForValue(I->getOperand(0));
1611 if (CondReg == 0) return false;
1612 Register Op1Reg = getRegForValue(I->getOperand(1));
1613 if (Op1Reg == 0) return false;
1614
1615 // Check to see if we can use an immediate in the conditional move.
1616 int Imm = 0;
1617 bool UseImm = false;
1618 bool isNegativeImm = false;
1619 if (const ConstantInt *ConstInt = dyn_cast<ConstantInt>(I->getOperand(2))) {
1620 assert(VT == MVT::i32 && "Expecting an i32.");
1621 Imm = (int)ConstInt->getValue().getZExtValue();
1622 if (Imm < 0) {
1623 isNegativeImm = true;
1624 Imm = ~Imm;
1625 }
1626 UseImm = isThumb2 ? (ARM_AM::getT2SOImmVal(Imm) != -1) :
1627 (ARM_AM::getSOImmVal(Imm) != -1);
1628 }
1629
1630 unsigned Op2Reg = 0;
1631 if (!UseImm) {
1632 Op2Reg = getRegForValue(I->getOperand(2));
1633 if (Op2Reg == 0) return false;
1634 }
1635
1636 unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri;
1637 CondReg = constrainOperandRegClass(TII.get(TstOpc), CondReg, 0);
1638 AddOptionalDefs(
1639 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TstOpc))
1640 .addReg(CondReg)
1641 .addImm(1));
1642
1643 unsigned MovCCOpc;
1644 const TargetRegisterClass *RC;
1645 if (!UseImm) {
1646 RC = isThumb2 ? &ARM::tGPRRegClass : &ARM::GPRRegClass;
1647 MovCCOpc = isThumb2 ? ARM::t2MOVCCr : ARM::MOVCCr;
1648 } else {
1649 RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRRegClass;
1650 if (!isNegativeImm)
1651 MovCCOpc = isThumb2 ? ARM::t2MOVCCi : ARM::MOVCCi;
1652 else
1653 MovCCOpc = isThumb2 ? ARM::t2MVNCCi : ARM::MVNCCi;
1654 }
1655 Register ResultReg = createResultReg(RC);
1656 if (!UseImm) {
1657 Op2Reg = constrainOperandRegClass(TII.get(MovCCOpc), Op2Reg, 1);
1658 Op1Reg = constrainOperandRegClass(TII.get(MovCCOpc), Op1Reg, 2);
1659 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(MovCCOpc),
1660 ResultReg)
1661 .addReg(Op2Reg)
1662 .addReg(Op1Reg)
1664 .addReg(ARM::CPSR);
1665 } else {
1666 Op1Reg = constrainOperandRegClass(TII.get(MovCCOpc), Op1Reg, 1);
1667 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(MovCCOpc),
1668 ResultReg)
1669 .addReg(Op1Reg)
1670 .addImm(Imm)
1672 .addReg(ARM::CPSR);
1673 }
1674 updateValueMap(I, ResultReg);
1675 return true;
1676}
1677
1678bool ARMFastISel::SelectDiv(const Instruction *I, bool isSigned) {
1679 MVT VT;
1680 Type *Ty = I->getType();
1681 if (!isTypeLegal(Ty, VT))
1682 return false;
1683
1684 // If we have integer div support we should have selected this automagically.
1685 // In case we have a real miss go ahead and return false and we'll pick
1686 // it up later.
1687 if (Subtarget->hasDivideInThumbMode())
1688 return false;
1689
1690 // Otherwise emit a libcall.
1691 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL;
1692 if (VT == MVT::i8)
1693 LC = isSigned ? RTLIB::SDIV_I8 : RTLIB::UDIV_I8;
1694 else if (VT == MVT::i16)
1695 LC = isSigned ? RTLIB::SDIV_I16 : RTLIB::UDIV_I16;
1696 else if (VT == MVT::i32)
1697 LC = isSigned ? RTLIB::SDIV_I32 : RTLIB::UDIV_I32;
1698 else if (VT == MVT::i64)
1699 LC = isSigned ? RTLIB::SDIV_I64 : RTLIB::UDIV_I64;
1700 else if (VT == MVT::i128)
1701 LC = isSigned ? RTLIB::SDIV_I128 : RTLIB::UDIV_I128;
1702 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported SDIV!");
1703
1704 return ARMEmitLibcall(I, LC);
1705}
1706
1707bool ARMFastISel::SelectRem(const Instruction *I, bool isSigned) {
1708 MVT VT;
1709 Type *Ty = I->getType();
1710 if (!isTypeLegal(Ty, VT))
1711 return false;
1712
1713 // Many ABIs do not provide a libcall for standalone remainder, so we need to
1714 // use divrem (see the RTABI 4.3.1). Since FastISel can't handle non-double
1715 // multi-reg returns, we'll have to bail out.
1716 if (!TLI.hasStandaloneRem(VT)) {
1717 return false;
1718 }
1719
1720 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL;
1721 if (VT == MVT::i8)
1722 LC = isSigned ? RTLIB::SREM_I8 : RTLIB::UREM_I8;
1723 else if (VT == MVT::i16)
1724 LC = isSigned ? RTLIB::SREM_I16 : RTLIB::UREM_I16;
1725 else if (VT == MVT::i32)
1726 LC = isSigned ? RTLIB::SREM_I32 : RTLIB::UREM_I32;
1727 else if (VT == MVT::i64)
1728 LC = isSigned ? RTLIB::SREM_I64 : RTLIB::UREM_I64;
1729 else if (VT == MVT::i128)
1730 LC = isSigned ? RTLIB::SREM_I128 : RTLIB::UREM_I128;
1731 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported SREM!");
1732
1733 return ARMEmitLibcall(I, LC);
1734}
1735
1736bool ARMFastISel::SelectBinaryIntOp(const Instruction *I, unsigned ISDOpcode) {
1737 EVT DestVT = TLI.getValueType(DL, I->getType(), true);
1738
1739 // We can get here in the case when we have a binary operation on a non-legal
1740 // type and the target independent selector doesn't know how to handle it.
1741 if (DestVT != MVT::i16 && DestVT != MVT::i8 && DestVT != MVT::i1)
1742 return false;
1743
1744 unsigned Opc;
1745 switch (ISDOpcode) {
1746 default: return false;
1747 case ISD::ADD:
1748 Opc = isThumb2 ? ARM::t2ADDrr : ARM::ADDrr;
1749 break;
1750 case ISD::OR:
1751 Opc = isThumb2 ? ARM::t2ORRrr : ARM::ORRrr;
1752 break;
1753 case ISD::SUB:
1754 Opc = isThumb2 ? ARM::t2SUBrr : ARM::SUBrr;
1755 break;
1756 }
1757
1758 Register SrcReg1 = getRegForValue(I->getOperand(0));
1759 if (SrcReg1 == 0) return false;
1760
1761 // TODO: Often the 2nd operand is an immediate, which can be encoded directly
1762 // in the instruction, rather then materializing the value in a register.
1763 Register SrcReg2 = getRegForValue(I->getOperand(1));
1764 if (SrcReg2 == 0) return false;
1765
1766 Register ResultReg = createResultReg(&ARM::GPRnopcRegClass);
1767 SrcReg1 = constrainOperandRegClass(TII.get(Opc), SrcReg1, 1);
1768 SrcReg2 = constrainOperandRegClass(TII.get(Opc), SrcReg2, 2);
1769 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1770 TII.get(Opc), ResultReg)
1771 .addReg(SrcReg1).addReg(SrcReg2));
1772 updateValueMap(I, ResultReg);
1773 return true;
1774}
1775
1776bool ARMFastISel::SelectBinaryFPOp(const Instruction *I, unsigned ISDOpcode) {
1777 EVT FPVT = TLI.getValueType(DL, I->getType(), true);
1778 if (!FPVT.isSimple()) return false;
1779 MVT VT = FPVT.getSimpleVT();
1780
1781 // FIXME: Support vector types where possible.
1782 if (VT.isVector())
1783 return false;
1784
1785 // We can get here in the case when we want to use NEON for our fp
1786 // operations, but can't figure out how to. Just use the vfp instructions
1787 // if we have them.
1788 // FIXME: It'd be nice to use NEON instructions.
1789 Type *Ty = I->getType();
1790 if (Ty->isFloatTy() && !Subtarget->hasVFP2Base())
1791 return false;
1792 if (Ty->isDoubleTy() && (!Subtarget->hasVFP2Base() || !Subtarget->hasFP64()))
1793 return false;
1794
1795 unsigned Opc;
1796 bool is64bit = VT == MVT::f64 || VT == MVT::i64;
1797 switch (ISDOpcode) {
1798 default: return false;
1799 case ISD::FADD:
1800 Opc = is64bit ? ARM::VADDD : ARM::VADDS;
1801 break;
1802 case ISD::FSUB:
1803 Opc = is64bit ? ARM::VSUBD : ARM::VSUBS;
1804 break;
1805 case ISD::FMUL:
1806 Opc = is64bit ? ARM::VMULD : ARM::VMULS;
1807 break;
1808 }
1809 Register Op1 = getRegForValue(I->getOperand(0));
1810 if (Op1 == 0) return false;
1811
1812 Register Op2 = getRegForValue(I->getOperand(1));
1813 if (Op2 == 0) return false;
1814
1815 Register ResultReg = createResultReg(TLI.getRegClassFor(VT.SimpleTy));
1816 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1817 TII.get(Opc), ResultReg)
1818 .addReg(Op1).addReg(Op2));
1819 updateValueMap(I, ResultReg);
1820 return true;
1821}
1822
1823// Call Handling Code
1824
1825// This is largely taken directly from CCAssignFnForNode
1826// TODO: We may not support all of this.
1827CCAssignFn *ARMFastISel::CCAssignFnForCall(CallingConv::ID CC,
1828 bool Return,
1829 bool isVarArg) {
1830 switch (CC) {
1831 default:
1832 report_fatal_error("Unsupported calling convention");
1833 case CallingConv::Fast:
1834 if (Subtarget->hasVFP2Base() && !isVarArg) {
1835 if (!Subtarget->isAAPCS_ABI())
1836 return (Return ? RetFastCC_ARM_APCS : FastCC_ARM_APCS);
1837 // For AAPCS ABI targets, just use VFP variant of the calling convention.
1838 return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP);
1839 }
1840 [[fallthrough]];
1841 case CallingConv::C:
1843 // Use target triple & subtarget features to do actual dispatch.
1844 if (Subtarget->isAAPCS_ABI()) {
1845 if (Subtarget->hasFPRegs() &&
1846 TM.Options.FloatABIType == FloatABI::Hard && !isVarArg)
1847 return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP);
1848 else
1849 return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS);
1850 } else {
1851 return (Return ? RetCC_ARM_APCS: CC_ARM_APCS);
1852 }
1854 case CallingConv::Swift:
1856 if (!isVarArg)
1857 return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP);
1858 // Fall through to soft float variant, variadic functions don't
1859 // use hard floating point ABI.
1860 [[fallthrough]];
1862 return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS);
1864 return (Return ? RetCC_ARM_APCS: CC_ARM_APCS);
1865 case CallingConv::GHC:
1866 if (Return)
1867 report_fatal_error("Can't return in GHC call convention");
1868 else
1869 return CC_ARM_APCS_GHC;
1871 return (Return ? RetCC_ARM_AAPCS : CC_ARM_Win32_CFGuard_Check);
1872 }
1873}
1874
1875bool ARMFastISel::ProcessCallArgs(SmallVectorImpl<Value*> &Args,
1877 SmallVectorImpl<MVT> &ArgVTs,
1881 unsigned &NumBytes,
1882 bool isVarArg) {
1884 CCState CCInfo(CC, isVarArg, *FuncInfo.MF, ArgLocs, *Context);
1885 CCInfo.AnalyzeCallOperands(ArgVTs, ArgFlags,
1886 CCAssignFnForCall(CC, false, isVarArg));
1887
1888 // Check that we can handle all of the arguments. If we can't, then bail out
1889 // now before we add code to the MBB.
1890 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1891 CCValAssign &VA = ArgLocs[i];
1892 MVT ArgVT = ArgVTs[VA.getValNo()];
1893
1894 // We don't handle NEON/vector parameters yet.
1895 if (ArgVT.isVector() || ArgVT.getSizeInBits() > 64)
1896 return false;
1897
1898 // Now copy/store arg to correct locations.
1899 if (VA.isRegLoc() && !VA.needsCustom()) {
1900 continue;
1901 } else if (VA.needsCustom()) {
1902 // TODO: We need custom lowering for vector (v2f64) args.
1903 if (VA.getLocVT() != MVT::f64 ||
1904 // TODO: Only handle register args for now.
1905 !VA.isRegLoc() || !ArgLocs[++i].isRegLoc())
1906 return false;
1907 } else {
1908 switch (ArgVT.SimpleTy) {
1909 default:
1910 return false;
1911 case MVT::i1:
1912 case MVT::i8:
1913 case MVT::i16:
1914 case MVT::i32:
1915 break;
1916 case MVT::f32:
1917 if (!Subtarget->hasVFP2Base())
1918 return false;
1919 break;
1920 case MVT::f64:
1921 if (!Subtarget->hasVFP2Base())
1922 return false;
1923 break;
1924 }
1925 }
1926 }
1927
1928 // At the point, we are able to handle the call's arguments in fast isel.
1929
1930 // Get a count of how many bytes are to be pushed on the stack.
1931 NumBytes = CCInfo.getStackSize();
1932
1933 // Issue CALLSEQ_START
1934 unsigned AdjStackDown = TII.getCallFrameSetupOpcode();
1935 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1936 TII.get(AdjStackDown))
1937 .addImm(NumBytes).addImm(0));
1938
1939 // Process the args.
1940 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1941 CCValAssign &VA = ArgLocs[i];
1942 const Value *ArgVal = Args[VA.getValNo()];
1943 Register Arg = ArgRegs[VA.getValNo()];
1944 MVT ArgVT = ArgVTs[VA.getValNo()];
1945
1946 assert((!ArgVT.isVector() && ArgVT.getSizeInBits() <= 64) &&
1947 "We don't handle NEON/vector parameters yet.");
1948
1949 // Handle arg promotion, etc.
1950 switch (VA.getLocInfo()) {
1951 case CCValAssign::Full: break;
1952 case CCValAssign::SExt: {
1953 MVT DestVT = VA.getLocVT();
1954 Arg = ARMEmitIntExt(ArgVT, Arg, DestVT, /*isZExt*/false);
1955 assert(Arg != 0 && "Failed to emit a sext");
1956 ArgVT = DestVT;
1957 break;
1958 }
1959 case CCValAssign::AExt:
1960 // Intentional fall-through. Handle AExt and ZExt.
1961 case CCValAssign::ZExt: {
1962 MVT DestVT = VA.getLocVT();
1963 Arg = ARMEmitIntExt(ArgVT, Arg, DestVT, /*isZExt*/true);
1964 assert(Arg != 0 && "Failed to emit a zext");
1965 ArgVT = DestVT;
1966 break;
1967 }
1968 case CCValAssign::BCvt: {
1969 unsigned BC = fastEmit_r(ArgVT, VA.getLocVT(), ISD::BITCAST, Arg);
1970 assert(BC != 0 && "Failed to emit a bitcast!");
1971 Arg = BC;
1972 ArgVT = VA.getLocVT();
1973 break;
1974 }
1975 default: llvm_unreachable("Unknown arg promotion!");
1976 }
1977
1978 // Now copy/store arg to correct locations.
1979 if (VA.isRegLoc() && !VA.needsCustom()) {
1980 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1981 TII.get(TargetOpcode::COPY), VA.getLocReg()).addReg(Arg);
1982 RegArgs.push_back(VA.getLocReg());
1983 } else if (VA.needsCustom()) {
1984 // TODO: We need custom lowering for vector (v2f64) args.
1985 assert(VA.getLocVT() == MVT::f64 &&
1986 "Custom lowering for v2f64 args not available");
1987
1988 // FIXME: ArgLocs[++i] may extend beyond ArgLocs.size()
1989 CCValAssign &NextVA = ArgLocs[++i];
1990
1991 assert(VA.isRegLoc() && NextVA.isRegLoc() &&
1992 "We only handle register args!");
1993
1994 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1995 TII.get(ARM::VMOVRRD), VA.getLocReg())
1997 .addReg(Arg));
1998 RegArgs.push_back(VA.getLocReg());
1999 RegArgs.push_back(NextVA.getLocReg());
2000 } else {
2001 assert(VA.isMemLoc());
2002 // Need to store on the stack.
2003
2004 // Don't emit stores for undef values.
2005 if (isa<UndefValue>(ArgVal))
2006 continue;
2007
2008 Address Addr;
2009 Addr.BaseType = Address::RegBase;
2010 Addr.Base.Reg = ARM::SP;
2011 Addr.Offset = VA.getLocMemOffset();
2012
2013 bool EmitRet = ARMEmitStore(ArgVT, Arg, Addr); (void)EmitRet;
2014 assert(EmitRet && "Could not emit a store for argument!");
2015 }
2016 }
2017
2018 return true;
2019}
2020
2021bool ARMFastISel::FinishCall(MVT RetVT, SmallVectorImpl<Register> &UsedRegs,
2023 unsigned &NumBytes, bool isVarArg) {
2024 // Issue CALLSEQ_END
2025 unsigned AdjStackUp = TII.getCallFrameDestroyOpcode();
2026 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
2027 TII.get(AdjStackUp))
2028 .addImm(NumBytes).addImm(-1ULL));
2029
2030 // Now the return value.
2031 if (RetVT != MVT::isVoid) {
2033 CCState CCInfo(CC, isVarArg, *FuncInfo.MF, RVLocs, *Context);
2034 CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC, true, isVarArg));
2035
2036 // Copy all of the result registers out of their specified physreg.
2037 if (RVLocs.size() == 2 && RetVT == MVT::f64) {
2038 // For this move we copy into two registers and then move into the
2039 // double fp reg we want.
2040 MVT DestVT = RVLocs[0].getValVT();
2041 const TargetRegisterClass* DstRC = TLI.getRegClassFor(DestVT);
2042 Register ResultReg = createResultReg(DstRC);
2043 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
2044 TII.get(ARM::VMOVDRR), ResultReg)
2045 .addReg(RVLocs[0].getLocReg())
2046 .addReg(RVLocs[1].getLocReg()));
2047
2048 UsedRegs.push_back(RVLocs[0].getLocReg());
2049 UsedRegs.push_back(RVLocs[1].getLocReg());
2050
2051 // Finally update the result.
2052 updateValueMap(I, ResultReg);
2053 } else {
2054 assert(RVLocs.size() == 1 &&"Can't handle non-double multi-reg retvals!");
2055 MVT CopyVT = RVLocs[0].getValVT();
2056
2057 // Special handling for extended integers.
2058 if (RetVT == MVT::i1 || RetVT == MVT::i8 || RetVT == MVT::i16)
2059 CopyVT = MVT::i32;
2060
2061 const TargetRegisterClass* DstRC = TLI.getRegClassFor(CopyVT);
2062
2063 Register ResultReg = createResultReg(DstRC);
2064 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
2065 TII.get(TargetOpcode::COPY),
2066 ResultReg).addReg(RVLocs[0].getLocReg());
2067 UsedRegs.push_back(RVLocs[0].getLocReg());
2068
2069 // Finally update the result.
2070 updateValueMap(I, ResultReg);
2071 }
2072 }
2073
2074 return true;
2075}
2076
2077bool ARMFastISel::SelectRet(const Instruction *I) {
2078 const ReturnInst *Ret = cast<ReturnInst>(I);
2079 const Function &F = *I->getParent()->getParent();
2080 const bool IsCmseNSEntry = F.hasFnAttribute("cmse_nonsecure_entry");
2081
2082 if (!FuncInfo.CanLowerReturn)
2083 return false;
2084
2085 if (TLI.supportSwiftError() &&
2086 F.getAttributes().hasAttrSomewhere(Attribute::SwiftError))
2087 return false;
2088
2089 if (TLI.supportSplitCSR(FuncInfo.MF))
2090 return false;
2091
2092 // Build a list of return value registers.
2094
2095 CallingConv::ID CC = F.getCallingConv();
2096 if (Ret->getNumOperands() > 0) {
2098 GetReturnInfo(CC, F.getReturnType(), F.getAttributes(), Outs, TLI, DL);
2099
2100 // Analyze operands of the call, assigning locations to each operand.
2102 CCState CCInfo(CC, F.isVarArg(), *FuncInfo.MF, ValLocs, I->getContext());
2103 CCInfo.AnalyzeReturn(Outs, CCAssignFnForCall(CC, true /* is Ret */,
2104 F.isVarArg()));
2105
2106 const Value *RV = Ret->getOperand(0);
2107 Register Reg = getRegForValue(RV);
2108 if (Reg == 0)
2109 return false;
2110
2111 // Only handle a single return value for now.
2112 if (ValLocs.size() != 1)
2113 return false;
2114
2115 CCValAssign &VA = ValLocs[0];
2116
2117 // Don't bother handling odd stuff for now.
2118 if (VA.getLocInfo() != CCValAssign::Full)
2119 return false;
2120 // Only handle register returns for now.
2121 if (!VA.isRegLoc())
2122 return false;
2123
2124 unsigned SrcReg = Reg + VA.getValNo();
2125 EVT RVEVT = TLI.getValueType(DL, RV->getType());
2126 if (!RVEVT.isSimple()) return false;
2127 MVT RVVT = RVEVT.getSimpleVT();
2128 MVT DestVT = VA.getValVT();
2129 // Special handling for extended integers.
2130 if (RVVT != DestVT) {
2131 if (RVVT != MVT::i1 && RVVT != MVT::i8 && RVVT != MVT::i16)
2132 return false;
2133
2134 assert(DestVT == MVT::i32 && "ARM should always ext to i32");
2135
2136 // Perform extension if flagged as either zext or sext. Otherwise, do
2137 // nothing.
2138 if (Outs[0].Flags.isZExt() || Outs[0].Flags.isSExt()) {
2139 SrcReg = ARMEmitIntExt(RVVT, SrcReg, DestVT, Outs[0].Flags.isZExt());
2140 if (SrcReg == 0) return false;
2141 }
2142 }
2143
2144 // Make the copy.
2145 Register DstReg = VA.getLocReg();
2146 const TargetRegisterClass* SrcRC = MRI.getRegClass(SrcReg);
2147 // Avoid a cross-class copy. This is very unlikely.
2148 if (!SrcRC->contains(DstReg))
2149 return false;
2150 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
2151 TII.get(TargetOpcode::COPY), DstReg).addReg(SrcReg);
2152
2153 // Add register to return instruction.
2154 RetRegs.push_back(VA.getLocReg());
2155 }
2156
2157 unsigned RetOpc;
2158 if (IsCmseNSEntry)
2159 if (isThumb2)
2160 RetOpc = ARM::tBXNS_RET;
2161 else
2162 llvm_unreachable("CMSE not valid for non-Thumb targets");
2163 else
2164 RetOpc = Subtarget->getReturnOpcode();
2165
2166 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
2167 TII.get(RetOpc));
2168 AddOptionalDefs(MIB);
2169 for (unsigned R : RetRegs)
2170 MIB.addReg(R, RegState::Implicit);
2171 return true;
2172}
2173
2174unsigned ARMFastISel::ARMSelectCallOp(bool UseReg) {
2175 if (UseReg)
2176 return isThumb2 ? gettBLXrOpcode(*MF) : getBLXOpcode(*MF);
2177 else
2178 return isThumb2 ? ARM::tBL : ARM::BL;
2179}
2180
2181unsigned ARMFastISel::getLibcallReg(const Twine &Name) {
2182 // Manually compute the global's type to avoid building it when unnecessary.
2183 Type *GVTy = PointerType::get(*Context, /*AS=*/0);
2184 EVT LCREVT = TLI.getValueType(DL, GVTy);
2185 if (!LCREVT.isSimple()) return 0;
2186
2187 GlobalValue *GV = M.getNamedGlobal(Name.str());
2188 if (!GV)
2189 GV = new GlobalVariable(M, Type::getInt32Ty(*Context), false,
2191
2192 return ARMMaterializeGV(GV, LCREVT.getSimpleVT());
2193}
2194
2195// A quick function that will emit a call for a named libcall in F with the
2196// vector of passed arguments for the Instruction in I. We can assume that we
2197// can emit a call for any libcall we can produce. This is an abridged version
2198// of the full call infrastructure since we won't need to worry about things
2199// like computed function pointers or strange arguments at call sites.
2200// TODO: Try to unify this and the normal call bits for ARM, then try to unify
2201// with X86.
2202bool ARMFastISel::ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call) {
2204
2205 // Handle *simple* calls for now.
2206 Type *RetTy = I->getType();
2207 MVT RetVT;
2208 if (RetTy->isVoidTy())
2209 RetVT = MVT::isVoid;
2210 else if (!isTypeLegal(RetTy, RetVT))
2211 return false;
2212
2213 // Can't handle non-double multi-reg retvals.
2214 if (RetVT != MVT::isVoid && RetVT != MVT::i32) {
2216 CCState CCInfo(CC, false, *FuncInfo.MF, RVLocs, *Context);
2217 CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC, true, false));
2218 if (RVLocs.size() >= 2 && RetVT != MVT::f64)
2219 return false;
2220 }
2221
2222 // Set up the argument vectors.
2225 SmallVector<MVT, 8> ArgVTs;
2227 Args.reserve(I->getNumOperands());
2228 ArgRegs.reserve(I->getNumOperands());
2229 ArgVTs.reserve(I->getNumOperands());
2230 ArgFlags.reserve(I->getNumOperands());
2231 for (Value *Op : I->operands()) {
2232 Register Arg = getRegForValue(Op);
2233 if (Arg == 0) return false;
2234
2235 Type *ArgTy = Op->getType();
2236 MVT ArgVT;
2237 if (!isTypeLegal(ArgTy, ArgVT)) return false;
2238
2240 Flags.setOrigAlign(DL.getABITypeAlign(ArgTy));
2241
2242 Args.push_back(Op);
2243 ArgRegs.push_back(Arg);
2244 ArgVTs.push_back(ArgVT);
2245 ArgFlags.push_back(Flags);
2246 }
2247
2248 // Handle the arguments now that we've gotten them.
2250 unsigned NumBytes;
2251 if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags,
2252 RegArgs, CC, NumBytes, false))
2253 return false;
2254
2255 Register CalleeReg;
2256 if (Subtarget->genLongCalls()) {
2257 CalleeReg = getLibcallReg(TLI.getLibcallName(Call));
2258 if (CalleeReg == 0) return false;
2259 }
2260
2261 // Issue the call.
2262 unsigned CallOpc = ARMSelectCallOp(Subtarget->genLongCalls());
2263 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
2264 MIMD, TII.get(CallOpc));
2265 // BL / BLX don't take a predicate, but tBL / tBLX do.
2266 if (isThumb2)
2267 MIB.add(predOps(ARMCC::AL));
2268 if (Subtarget->genLongCalls()) {
2269 CalleeReg =
2270 constrainOperandRegClass(TII.get(CallOpc), CalleeReg, isThumb2 ? 2 : 0);
2271 MIB.addReg(CalleeReg);
2272 } else
2273 MIB.addExternalSymbol(TLI.getLibcallName(Call));
2274
2275 // Add implicit physical register uses to the call.
2276 for (Register R : RegArgs)
2277 MIB.addReg(R, RegState::Implicit);
2278
2279 // Add a register mask with the call-preserved registers.
2280 // Proper defs for return values will be added by setPhysRegsDeadExcept().
2281 MIB.addRegMask(TRI.getCallPreservedMask(*FuncInfo.MF, CC));
2282
2283 // Finish off the call including any return values.
2284 SmallVector<Register, 4> UsedRegs;
2285 if (!FinishCall(RetVT, UsedRegs, I, CC, NumBytes, false)) return false;
2286
2287 // Set all unused physreg defs as dead.
2288 static_cast<MachineInstr *>(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI);
2289
2290 return true;
2291}
2292
2293bool ARMFastISel::SelectCall(const Instruction *I,
2294 const char *IntrMemName = nullptr) {
2295 const CallInst *CI = cast<CallInst>(I);
2296 const Value *Callee = CI->getCalledOperand();
2297
2298 // Can't handle inline asm.
2299 if (isa<InlineAsm>(Callee)) return false;
2300
2301 // Allow SelectionDAG isel to handle tail calls.
2302 if (CI->isTailCall()) return false;
2303
2304 // Check the calling convention.
2306
2307 // TODO: Avoid some calling conventions?
2308
2309 FunctionType *FTy = CI->getFunctionType();
2310 bool isVarArg = FTy->isVarArg();
2311
2312 // Handle *simple* calls for now.
2313 Type *RetTy = I->getType();
2314 MVT RetVT;
2315 if (RetTy->isVoidTy())
2316 RetVT = MVT::isVoid;
2317 else if (!isTypeLegal(RetTy, RetVT) && RetVT != MVT::i16 &&
2318 RetVT != MVT::i8 && RetVT != MVT::i1)
2319 return false;
2320
2321 // Can't handle non-double multi-reg retvals.
2322 if (RetVT != MVT::isVoid && RetVT != MVT::i1 && RetVT != MVT::i8 &&
2323 RetVT != MVT::i16 && RetVT != MVT::i32) {
2325 CCState CCInfo(CC, isVarArg, *FuncInfo.MF, RVLocs, *Context);
2326 CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC, true, isVarArg));
2327 if (RVLocs.size() >= 2 && RetVT != MVT::f64)
2328 return false;
2329 }
2330
2331 // Set up the argument vectors.
2334 SmallVector<MVT, 8> ArgVTs;
2336 unsigned arg_size = CI->arg_size();
2337 Args.reserve(arg_size);
2338 ArgRegs.reserve(arg_size);
2339 ArgVTs.reserve(arg_size);
2340 ArgFlags.reserve(arg_size);
2341 for (auto ArgI = CI->arg_begin(), ArgE = CI->arg_end(); ArgI != ArgE; ++ArgI) {
2342 // If we're lowering a memory intrinsic instead of a regular call, skip the
2343 // last argument, which shouldn't be passed to the underlying function.
2344 if (IntrMemName && ArgE - ArgI <= 1)
2345 break;
2346
2348 unsigned ArgIdx = ArgI - CI->arg_begin();
2349 if (CI->paramHasAttr(ArgIdx, Attribute::SExt))
2350 Flags.setSExt();
2351 if (CI->paramHasAttr(ArgIdx, Attribute::ZExt))
2352 Flags.setZExt();
2353
2354 // FIXME: Only handle *easy* calls for now.
2355 if (CI->paramHasAttr(ArgIdx, Attribute::InReg) ||
2356 CI->paramHasAttr(ArgIdx, Attribute::StructRet) ||
2357 CI->paramHasAttr(ArgIdx, Attribute::SwiftSelf) ||
2358 CI->paramHasAttr(ArgIdx, Attribute::SwiftError) ||
2359 CI->paramHasAttr(ArgIdx, Attribute::Nest) ||
2360 CI->paramHasAttr(ArgIdx, Attribute::ByVal))
2361 return false;
2362
2363 Type *ArgTy = (*ArgI)->getType();
2364 MVT ArgVT;
2365 if (!isTypeLegal(ArgTy, ArgVT) && ArgVT != MVT::i16 && ArgVT != MVT::i8 &&
2366 ArgVT != MVT::i1)
2367 return false;
2368
2369 Register Arg = getRegForValue(*ArgI);
2370 if (!Arg.isValid())
2371 return false;
2372
2373 Flags.setOrigAlign(DL.getABITypeAlign(ArgTy));
2374
2375 Args.push_back(*ArgI);
2376 ArgRegs.push_back(Arg);
2377 ArgVTs.push_back(ArgVT);
2378 ArgFlags.push_back(Flags);
2379 }
2380
2381 // Handle the arguments now that we've gotten them.
2383 unsigned NumBytes;
2384 if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags,
2385 RegArgs, CC, NumBytes, isVarArg))
2386 return false;
2387
2388 bool UseReg = false;
2389 const GlobalValue *GV = dyn_cast<GlobalValue>(Callee);
2390 if (!GV || Subtarget->genLongCalls()) UseReg = true;
2391
2392 Register CalleeReg;
2393 if (UseReg) {
2394 if (IntrMemName)
2395 CalleeReg = getLibcallReg(IntrMemName);
2396 else
2397 CalleeReg = getRegForValue(Callee);
2398
2399 if (CalleeReg == 0) return false;
2400 }
2401
2402 // Issue the call.
2403 unsigned CallOpc = ARMSelectCallOp(UseReg);
2404 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
2405 MIMD, TII.get(CallOpc));
2406
2407 // ARM calls don't take a predicate, but tBL / tBLX do.
2408 if(isThumb2)
2409 MIB.add(predOps(ARMCC::AL));
2410 if (UseReg) {
2411 CalleeReg =
2412 constrainOperandRegClass(TII.get(CallOpc), CalleeReg, isThumb2 ? 2 : 0);
2413 MIB.addReg(CalleeReg);
2414 } else if (!IntrMemName)
2415 MIB.addGlobalAddress(GV, 0, 0);
2416 else
2417 MIB.addExternalSymbol(IntrMemName, 0);
2418
2419 // Add implicit physical register uses to the call.
2420 for (Register R : RegArgs)
2421 MIB.addReg(R, RegState::Implicit);
2422
2423 // Add a register mask with the call-preserved registers.
2424 // Proper defs for return values will be added by setPhysRegsDeadExcept().
2425 MIB.addRegMask(TRI.getCallPreservedMask(*FuncInfo.MF, CC));
2426
2427 // Finish off the call including any return values.
2428 SmallVector<Register, 4> UsedRegs;
2429 if (!FinishCall(RetVT, UsedRegs, I, CC, NumBytes, isVarArg))
2430 return false;
2431
2432 // Set all unused physreg defs as dead.
2433 static_cast<MachineInstr *>(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI);
2434
2435 return true;
2436}
2437
2438bool ARMFastISel::ARMIsMemCpySmall(uint64_t Len) {
2439 return Len <= 16;
2440}
2441
2442bool ARMFastISel::ARMTryEmitSmallMemCpy(Address Dest, Address Src, uint64_t Len,
2443 MaybeAlign Alignment) {
2444 // Make sure we don't bloat code by inlining very large memcpy's.
2445 if (!ARMIsMemCpySmall(Len))
2446 return false;
2447
2448 while (Len) {
2449 MVT VT;
2450 if (!Alignment || *Alignment >= 4) {
2451 if (Len >= 4)
2452 VT = MVT::i32;
2453 else if (Len >= 2)
2454 VT = MVT::i16;
2455 else {
2456 assert(Len == 1 && "Expected a length of 1!");
2457 VT = MVT::i8;
2458 }
2459 } else {
2460 assert(Alignment && "Alignment is set in this branch");
2461 // Bound based on alignment.
2462 if (Len >= 2 && *Alignment == 2)
2463 VT = MVT::i16;
2464 else {
2465 VT = MVT::i8;
2466 }
2467 }
2468
2469 bool RV;
2470 Register ResultReg;
2471 RV = ARMEmitLoad(VT, ResultReg, Src);
2472 assert(RV && "Should be able to handle this load.");
2473 RV = ARMEmitStore(VT, ResultReg, Dest);
2474 assert(RV && "Should be able to handle this store.");
2475 (void)RV;
2476
2477 unsigned Size = VT.getSizeInBits()/8;
2478 Len -= Size;
2479 Dest.Offset += Size;
2480 Src.Offset += Size;
2481 }
2482
2483 return true;
2484}
2485
2486bool ARMFastISel::SelectIntrinsicCall(const IntrinsicInst &I) {
2487 // FIXME: Handle more intrinsics.
2488 switch (I.getIntrinsicID()) {
2489 default: return false;
2490 case Intrinsic::frameaddress: {
2491 MachineFrameInfo &MFI = FuncInfo.MF->getFrameInfo();
2492 MFI.setFrameAddressIsTaken(true);
2493
2494 unsigned LdrOpc = isThumb2 ? ARM::t2LDRi12 : ARM::LDRi12;
2495 const TargetRegisterClass *RC = isThumb2 ? &ARM::tGPRRegClass
2496 : &ARM::GPRRegClass;
2497
2499 static_cast<const ARMBaseRegisterInfo *>(Subtarget->getRegisterInfo());
2500 Register FramePtr = RegInfo->getFrameRegister(*(FuncInfo.MF));
2501 unsigned SrcReg = FramePtr;
2502
2503 // Recursively load frame address
2504 // ldr r0 [fp]
2505 // ldr r0 [r0]
2506 // ldr r0 [r0]
2507 // ...
2508 unsigned DestReg;
2509 unsigned Depth = cast<ConstantInt>(I.getOperand(0))->getZExtValue();
2510 while (Depth--) {
2511 DestReg = createResultReg(RC);
2512 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
2513 TII.get(LdrOpc), DestReg)
2514 .addReg(SrcReg).addImm(0));
2515 SrcReg = DestReg;
2516 }
2517 updateValueMap(&I, SrcReg);
2518 return true;
2519 }
2520 case Intrinsic::memcpy:
2521 case Intrinsic::memmove: {
2522 const MemTransferInst &MTI = cast<MemTransferInst>(I);
2523 // Don't handle volatile.
2524 if (MTI.isVolatile())
2525 return false;
2526
2527 // Disable inlining for memmove before calls to ComputeAddress. Otherwise,
2528 // we would emit dead code because we don't currently handle memmoves.
2529 bool isMemCpy = (I.getIntrinsicID() == Intrinsic::memcpy);
2530 if (isa<ConstantInt>(MTI.getLength()) && isMemCpy) {
2531 // Small memcpy's are common enough that we want to do them without a call
2532 // if possible.
2533 uint64_t Len = cast<ConstantInt>(MTI.getLength())->getZExtValue();
2534 if (ARMIsMemCpySmall(Len)) {
2535 Address Dest, Src;
2536 if (!ARMComputeAddress(MTI.getRawDest(), Dest) ||
2537 !ARMComputeAddress(MTI.getRawSource(), Src))
2538 return false;
2539 MaybeAlign Alignment;
2540 if (MTI.getDestAlign() || MTI.getSourceAlign())
2541 Alignment = std::min(MTI.getDestAlign().valueOrOne(),
2542 MTI.getSourceAlign().valueOrOne());
2543 if (ARMTryEmitSmallMemCpy(Dest, Src, Len, Alignment))
2544 return true;
2545 }
2546 }
2547
2548 if (!MTI.getLength()->getType()->isIntegerTy(32))
2549 return false;
2550
2551 if (MTI.getSourceAddressSpace() > 255 || MTI.getDestAddressSpace() > 255)
2552 return false;
2553
2554 const char *IntrMemName = isa<MemCpyInst>(I) ? "memcpy" : "memmove";
2555 return SelectCall(&I, IntrMemName);
2556 }
2557 case Intrinsic::memset: {
2558 const MemSetInst &MSI = cast<MemSetInst>(I);
2559 // Don't handle volatile.
2560 if (MSI.isVolatile())
2561 return false;
2562
2563 if (!MSI.getLength()->getType()->isIntegerTy(32))
2564 return false;
2565
2566 if (MSI.getDestAddressSpace() > 255)
2567 return false;
2568
2569 return SelectCall(&I, "memset");
2570 }
2571 case Intrinsic::trap: {
2572 unsigned Opcode;
2573 if (Subtarget->isThumb())
2574 Opcode = ARM::tTRAP;
2575 else
2576 Opcode = Subtarget->useNaClTrap() ? ARM::TRAPNaCl : ARM::TRAP;
2577 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opcode));
2578 return true;
2579 }
2580 }
2581}
2582
2583bool ARMFastISel::SelectTrunc(const Instruction *I) {
2584 // The high bits for a type smaller than the register size are assumed to be
2585 // undefined.
2586 Value *Op = I->getOperand(0);
2587
2588 EVT SrcVT, DestVT;
2589 SrcVT = TLI.getValueType(DL, Op->getType(), true);
2590 DestVT = TLI.getValueType(DL, I->getType(), true);
2591
2592 if (SrcVT != MVT::i32 && SrcVT != MVT::i16 && SrcVT != MVT::i8)
2593 return false;
2594 if (DestVT != MVT::i16 && DestVT != MVT::i8 && DestVT != MVT::i1)
2595 return false;
2596
2597 Register SrcReg = getRegForValue(Op);
2598 if (!SrcReg) return false;
2599
2600 // Because the high bits are undefined, a truncate doesn't generate
2601 // any code.
2602 updateValueMap(I, SrcReg);
2603 return true;
2604}
2605
2606unsigned ARMFastISel::ARMEmitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT,
2607 bool isZExt) {
2608 if (DestVT != MVT::i32 && DestVT != MVT::i16 && DestVT != MVT::i8)
2609 return 0;
2610 if (SrcVT != MVT::i16 && SrcVT != MVT::i8 && SrcVT != MVT::i1)
2611 return 0;
2612
2613 // Table of which combinations can be emitted as a single instruction,
2614 // and which will require two.
2615 static const uint8_t isSingleInstrTbl[3][2][2][2] = {
2616 // ARM Thumb
2617 // !hasV6Ops hasV6Ops !hasV6Ops hasV6Ops
2618 // ext: s z s z s z s z
2619 /* 1 */ { { { 0, 1 }, { 0, 1 } }, { { 0, 0 }, { 0, 1 } } },
2620 /* 8 */ { { { 0, 1 }, { 1, 1 } }, { { 0, 0 }, { 1, 1 } } },
2621 /* 16 */ { { { 0, 0 }, { 1, 1 } }, { { 0, 0 }, { 1, 1 } } }
2622 };
2623
2624 // Target registers for:
2625 // - For ARM can never be PC.
2626 // - For 16-bit Thumb are restricted to lower 8 registers.
2627 // - For 32-bit Thumb are restricted to non-SP and non-PC.
2628 static const TargetRegisterClass *RCTbl[2][2] = {
2629 // Instructions: Two Single
2630 /* ARM */ { &ARM::GPRnopcRegClass, &ARM::GPRnopcRegClass },
2631 /* Thumb */ { &ARM::tGPRRegClass, &ARM::rGPRRegClass }
2632 };
2633
2634 // Table governing the instruction(s) to be emitted.
2635 static const struct InstructionTable {
2636 uint32_t Opc : 16;
2637 uint32_t hasS : 1; // Some instructions have an S bit, always set it to 0.
2638 uint32_t Shift : 7; // For shift operand addressing mode, used by MOVsi.
2639 uint32_t Imm : 8; // All instructions have either a shift or a mask.
2640 } IT[2][2][3][2] = {
2641 { // Two instructions (first is left shift, second is in this table).
2642 { // ARM Opc S Shift Imm
2643 /* 1 bit sext */ { { ARM::MOVsi , 1, ARM_AM::asr , 31 },
2644 /* 1 bit zext */ { ARM::MOVsi , 1, ARM_AM::lsr , 31 } },
2645 /* 8 bit sext */ { { ARM::MOVsi , 1, ARM_AM::asr , 24 },
2646 /* 8 bit zext */ { ARM::MOVsi , 1, ARM_AM::lsr , 24 } },
2647 /* 16 bit sext */ { { ARM::MOVsi , 1, ARM_AM::asr , 16 },
2648 /* 16 bit zext */ { ARM::MOVsi , 1, ARM_AM::lsr , 16 } }
2649 },
2650 { // Thumb Opc S Shift Imm
2651 /* 1 bit sext */ { { ARM::tASRri , 0, ARM_AM::no_shift, 31 },
2652 /* 1 bit zext */ { ARM::tLSRri , 0, ARM_AM::no_shift, 31 } },
2653 /* 8 bit sext */ { { ARM::tASRri , 0, ARM_AM::no_shift, 24 },
2654 /* 8 bit zext */ { ARM::tLSRri , 0, ARM_AM::no_shift, 24 } },
2655 /* 16 bit sext */ { { ARM::tASRri , 0, ARM_AM::no_shift, 16 },
2656 /* 16 bit zext */ { ARM::tLSRri , 0, ARM_AM::no_shift, 16 } }
2657 }
2658 },
2659 { // Single instruction.
2660 { // ARM Opc S Shift Imm
2661 /* 1 bit sext */ { { ARM::KILL , 0, ARM_AM::no_shift, 0 },
2662 /* 1 bit zext */ { ARM::ANDri , 1, ARM_AM::no_shift, 1 } },
2663 /* 8 bit sext */ { { ARM::SXTB , 0, ARM_AM::no_shift, 0 },
2664 /* 8 bit zext */ { ARM::ANDri , 1, ARM_AM::no_shift, 255 } },
2665 /* 16 bit sext */ { { ARM::SXTH , 0, ARM_AM::no_shift, 0 },
2666 /* 16 bit zext */ { ARM::UXTH , 0, ARM_AM::no_shift, 0 } }
2667 },
2668 { // Thumb Opc S Shift Imm
2669 /* 1 bit sext */ { { ARM::KILL , 0, ARM_AM::no_shift, 0 },
2670 /* 1 bit zext */ { ARM::t2ANDri, 1, ARM_AM::no_shift, 1 } },
2671 /* 8 bit sext */ { { ARM::t2SXTB , 0, ARM_AM::no_shift, 0 },
2672 /* 8 bit zext */ { ARM::t2ANDri, 1, ARM_AM::no_shift, 255 } },
2673 /* 16 bit sext */ { { ARM::t2SXTH , 0, ARM_AM::no_shift, 0 },
2674 /* 16 bit zext */ { ARM::t2UXTH , 0, ARM_AM::no_shift, 0 } }
2675 }
2676 }
2677 };
2678
2679 unsigned SrcBits = SrcVT.getSizeInBits();
2680 unsigned DestBits = DestVT.getSizeInBits();
2681 (void) DestBits;
2682 assert((SrcBits < DestBits) && "can only extend to larger types");
2683 assert((DestBits == 32 || DestBits == 16 || DestBits == 8) &&
2684 "other sizes unimplemented");
2685 assert((SrcBits == 16 || SrcBits == 8 || SrcBits == 1) &&
2686 "other sizes unimplemented");
2687
2688 bool hasV6Ops = Subtarget->hasV6Ops();
2689 unsigned Bitness = SrcBits / 8; // {1,8,16}=>{0,1,2}
2690 assert((Bitness < 3) && "sanity-check table bounds");
2691
2692 bool isSingleInstr = isSingleInstrTbl[Bitness][isThumb2][hasV6Ops][isZExt];
2693 const TargetRegisterClass *RC = RCTbl[isThumb2][isSingleInstr];
2694 const InstructionTable *ITP = &IT[isSingleInstr][isThumb2][Bitness][isZExt];
2695 unsigned Opc = ITP->Opc;
2696 assert(ARM::KILL != Opc && "Invalid table entry");
2697 unsigned hasS = ITP->hasS;
2698 ARM_AM::ShiftOpc Shift = (ARM_AM::ShiftOpc) ITP->Shift;
2699 assert(((Shift == ARM_AM::no_shift) == (Opc != ARM::MOVsi)) &&
2700 "only MOVsi has shift operand addressing mode");
2701 unsigned Imm = ITP->Imm;
2702
2703 // 16-bit Thumb instructions always set CPSR (unless they're in an IT block).
2704 bool setsCPSR = &ARM::tGPRRegClass == RC;
2705 unsigned LSLOpc = isThumb2 ? ARM::tLSLri : ARM::MOVsi;
2706 unsigned ResultReg;
2707 // MOVsi encodes shift and immediate in shift operand addressing mode.
2708 // The following condition has the same value when emitting two
2709 // instruction sequences: both are shifts.
2710 bool ImmIsSO = (Shift != ARM_AM::no_shift);
2711
2712 // Either one or two instructions are emitted.
2713 // They're always of the form:
2714 // dst = in OP imm
2715 // CPSR is set only by 16-bit Thumb instructions.
2716 // Predicate, if any, is AL.
2717 // S bit, if available, is always 0.
2718 // When two are emitted the first's result will feed as the second's input,
2719 // that value is then dead.
2720 unsigned NumInstrsEmitted = isSingleInstr ? 1 : 2;
2721 for (unsigned Instr = 0; Instr != NumInstrsEmitted; ++Instr) {
2722 ResultReg = createResultReg(RC);
2723 bool isLsl = (0 == Instr) && !isSingleInstr;
2724 unsigned Opcode = isLsl ? LSLOpc : Opc;
2725 ARM_AM::ShiftOpc ShiftAM = isLsl ? ARM_AM::lsl : Shift;
2726 unsigned ImmEnc = ImmIsSO ? ARM_AM::getSORegOpc(ShiftAM, Imm) : Imm;
2727 bool isKill = 1 == Instr;
2729 *FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opcode), ResultReg);
2730 if (setsCPSR)
2731 MIB.addReg(ARM::CPSR, RegState::Define);
2732 SrcReg = constrainOperandRegClass(TII.get(Opcode), SrcReg, 1 + setsCPSR);
2733 MIB.addReg(SrcReg, isKill * RegState::Kill)
2734 .addImm(ImmEnc)
2736 if (hasS)
2737 MIB.add(condCodeOp());
2738 // Second instruction consumes the first's result.
2739 SrcReg = ResultReg;
2740 }
2741
2742 return ResultReg;
2743}
2744
2745bool ARMFastISel::SelectIntExt(const Instruction *I) {
2746 // On ARM, in general, integer casts don't involve legal types; this code
2747 // handles promotable integers.
2748 Type *DestTy = I->getType();
2749 Value *Src = I->getOperand(0);
2750 Type *SrcTy = Src->getType();
2751
2752 bool isZExt = isa<ZExtInst>(I);
2753 Register SrcReg = getRegForValue(Src);
2754 if (!SrcReg) return false;
2755
2756 EVT SrcEVT, DestEVT;
2757 SrcEVT = TLI.getValueType(DL, SrcTy, true);
2758 DestEVT = TLI.getValueType(DL, DestTy, true);
2759 if (!SrcEVT.isSimple()) return false;
2760 if (!DestEVT.isSimple()) return false;
2761
2762 MVT SrcVT = SrcEVT.getSimpleVT();
2763 MVT DestVT = DestEVT.getSimpleVT();
2764 unsigned ResultReg = ARMEmitIntExt(SrcVT, SrcReg, DestVT, isZExt);
2765 if (ResultReg == 0) return false;
2766 updateValueMap(I, ResultReg);
2767 return true;
2768}
2769
2770bool ARMFastISel::SelectShift(const Instruction *I,
2771 ARM_AM::ShiftOpc ShiftTy) {
2772 // We handle thumb2 mode by target independent selector
2773 // or SelectionDAG ISel.
2774 if (isThumb2)
2775 return false;
2776
2777 // Only handle i32 now.
2778 EVT DestVT = TLI.getValueType(DL, I->getType(), true);
2779 if (DestVT != MVT::i32)
2780 return false;
2781
2782 unsigned Opc = ARM::MOVsr;
2783 unsigned ShiftImm;
2784 Value *Src2Value = I->getOperand(1);
2785 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Src2Value)) {
2786 ShiftImm = CI->getZExtValue();
2787
2788 // Fall back to selection DAG isel if the shift amount
2789 // is zero or greater than the width of the value type.
2790 if (ShiftImm == 0 || ShiftImm >=32)
2791 return false;
2792
2793 Opc = ARM::MOVsi;
2794 }
2795
2796 Value *Src1Value = I->getOperand(0);
2797 Register Reg1 = getRegForValue(Src1Value);
2798 if (Reg1 == 0) return false;
2799
2800 unsigned Reg2 = 0;
2801 if (Opc == ARM::MOVsr) {
2802 Reg2 = getRegForValue(Src2Value);
2803 if (Reg2 == 0) return false;
2804 }
2805
2806 Register ResultReg = createResultReg(&ARM::GPRnopcRegClass);
2807 if(ResultReg == 0) return false;
2808
2809 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
2810 TII.get(Opc), ResultReg)
2811 .addReg(Reg1);
2812
2813 if (Opc == ARM::MOVsi)
2814 MIB.addImm(ARM_AM::getSORegOpc(ShiftTy, ShiftImm));
2815 else if (Opc == ARM::MOVsr) {
2816 MIB.addReg(Reg2);
2817 MIB.addImm(ARM_AM::getSORegOpc(ShiftTy, 0));
2818 }
2819
2820 AddOptionalDefs(MIB);
2821 updateValueMap(I, ResultReg);
2822 return true;
2823}
2824
2825// TODO: SoftFP support.
2826bool ARMFastISel::fastSelectInstruction(const Instruction *I) {
2827 switch (I->getOpcode()) {
2828 case Instruction::Load:
2829 return SelectLoad(I);
2830 case Instruction::Store:
2831 return SelectStore(I);
2832 case Instruction::Br:
2833 return SelectBranch(I);
2834 case Instruction::IndirectBr:
2835 return SelectIndirectBr(I);
2836 case Instruction::ICmp:
2837 case Instruction::FCmp:
2838 return SelectCmp(I);
2839 case Instruction::FPExt:
2840 return SelectFPExt(I);
2841 case Instruction::FPTrunc:
2842 return SelectFPTrunc(I);
2843 case Instruction::SIToFP:
2844 return SelectIToFP(I, /*isSigned*/ true);
2845 case Instruction::UIToFP:
2846 return SelectIToFP(I, /*isSigned*/ false);
2847 case Instruction::FPToSI:
2848 return SelectFPToI(I, /*isSigned*/ true);
2849 case Instruction::FPToUI:
2850 return SelectFPToI(I, /*isSigned*/ false);
2851 case Instruction::Add:
2852 return SelectBinaryIntOp(I, ISD::ADD);
2853 case Instruction::Or:
2854 return SelectBinaryIntOp(I, ISD::OR);
2855 case Instruction::Sub:
2856 return SelectBinaryIntOp(I, ISD::SUB);
2857 case Instruction::FAdd:
2858 return SelectBinaryFPOp(I, ISD::FADD);
2859 case Instruction::FSub:
2860 return SelectBinaryFPOp(I, ISD::FSUB);
2861 case Instruction::FMul:
2862 return SelectBinaryFPOp(I, ISD::FMUL);
2863 case Instruction::SDiv:
2864 return SelectDiv(I, /*isSigned*/ true);
2865 case Instruction::UDiv:
2866 return SelectDiv(I, /*isSigned*/ false);
2867 case Instruction::SRem:
2868 return SelectRem(I, /*isSigned*/ true);
2869 case Instruction::URem:
2870 return SelectRem(I, /*isSigned*/ false);
2871 case Instruction::Call:
2872 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I))
2873 return SelectIntrinsicCall(*II);
2874 return SelectCall(I);
2875 case Instruction::Select:
2876 return SelectSelect(I);
2877 case Instruction::Ret:
2878 return SelectRet(I);
2879 case Instruction::Trunc:
2880 return SelectTrunc(I);
2881 case Instruction::ZExt:
2882 case Instruction::SExt:
2883 return SelectIntExt(I);
2884 case Instruction::Shl:
2885 return SelectShift(I, ARM_AM::lsl);
2886 case Instruction::LShr:
2887 return SelectShift(I, ARM_AM::lsr);
2888 case Instruction::AShr:
2889 return SelectShift(I, ARM_AM::asr);
2890 default: break;
2891 }
2892 return false;
2893}
2894
2895// This table describes sign- and zero-extend instructions which can be
2896// folded into a preceding load. All of these extends have an immediate
2897// (sometimes a mask and sometimes a shift) that's applied after
2898// extension.
2899static const struct FoldableLoadExtendsStruct {
2900 uint16_t Opc[2]; // ARM, Thumb.
2902 uint8_t isZExt : 1;
2903 uint8_t ExpectedVT : 7;
2904} FoldableLoadExtends[] = {
2905 { { ARM::SXTH, ARM::t2SXTH }, 0, 0, MVT::i16 },
2906 { { ARM::UXTH, ARM::t2UXTH }, 0, 1, MVT::i16 },
2907 { { ARM::ANDri, ARM::t2ANDri }, 255, 1, MVT::i8 },
2908 { { ARM::SXTB, ARM::t2SXTB }, 0, 0, MVT::i8 },
2909 { { ARM::UXTB, ARM::t2UXTB }, 0, 1, MVT::i8 }
2911
2912/// The specified machine instr operand is a vreg, and that
2913/// vreg is being provided by the specified load instruction. If possible,
2914/// try to fold the load as an operand to the instruction, returning true if
2915/// successful.
2916bool ARMFastISel::tryToFoldLoadIntoMI(MachineInstr *MI, unsigned OpNo,
2917 const LoadInst *LI) {
2918 // Verify we have a legal type before going any further.
2919 MVT VT;
2920 if (!isLoadTypeLegal(LI->getType(), VT))
2921 return false;
2922
2923 // Combine load followed by zero- or sign-extend.
2924 // ldrb r1, [r0] ldrb r1, [r0]
2925 // uxtb r2, r1 =>
2926 // mov r3, r2 mov r3, r1
2927 if (MI->getNumOperands() < 3 || !MI->getOperand(2).isImm())
2928 return false;
2929 const uint64_t Imm = MI->getOperand(2).getImm();
2930
2931 bool Found = false;
2932 bool isZExt;
2934 if (FLE.Opc[isThumb2] == MI->getOpcode() &&
2935 (uint64_t)FLE.ExpectedImm == Imm &&
2936 MVT((MVT::SimpleValueType)FLE.ExpectedVT) == VT) {
2937 Found = true;
2938 isZExt = FLE.isZExt;
2939 }
2940 }
2941 if (!Found) return false;
2942
2943 // See if we can handle this address.
2944 Address Addr;
2945 if (!ARMComputeAddress(LI->getOperand(0), Addr)) return false;
2946
2947 Register ResultReg = MI->getOperand(0).getReg();
2948 if (!ARMEmitLoad(VT, ResultReg, Addr, LI->getAlign(), isZExt, false))
2949 return false;
2951 removeDeadCode(I, std::next(I));
2952 return true;
2953}
2954
2955unsigned ARMFastISel::ARMLowerPICELF(const GlobalValue *GV, MVT VT) {
2956 bool UseGOT_PREL = !GV->isDSOLocal();
2957 LLVMContext *Context = &MF->getFunction().getContext();
2958 unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
2959 unsigned PCAdj = Subtarget->isThumb() ? 4 : 8;
2961 GV, ARMPCLabelIndex, ARMCP::CPValue, PCAdj,
2962 UseGOT_PREL ? ARMCP::GOT_PREL : ARMCP::no_modifier,
2963 /*AddCurrentAddress=*/UseGOT_PREL);
2964
2965 Align ConstAlign =
2966 MF->getDataLayout().getPrefTypeAlign(PointerType::get(*Context, 0));
2967 unsigned Idx = MF->getConstantPool()->getConstantPoolIndex(CPV, ConstAlign);
2968 MachineMemOperand *CPMMO =
2969 MF->getMachineMemOperand(MachinePointerInfo::getConstantPool(*MF),
2971
2972 Register TempReg = MF->getRegInfo().createVirtualRegister(&ARM::rGPRRegClass);
2973 unsigned Opc = isThumb2 ? ARM::t2LDRpci : ARM::LDRcp;
2975 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), TempReg)
2977 .addMemOperand(CPMMO);
2978 if (Opc == ARM::LDRcp)
2979 MIB.addImm(0);
2980 MIB.add(predOps(ARMCC::AL));
2981
2982 // Fix the address by adding pc.
2983 Register DestReg = createResultReg(TLI.getRegClassFor(VT));
2984 Opc = Subtarget->isThumb() ? ARM::tPICADD : UseGOT_PREL ? ARM::PICLDR
2985 : ARM::PICADD;
2986 DestReg = constrainOperandRegClass(TII.get(Opc), DestReg, 0);
2987 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), DestReg)
2988 .addReg(TempReg)
2989 .addImm(ARMPCLabelIndex);
2990
2991 if (!Subtarget->isThumb())
2992 MIB.add(predOps(ARMCC::AL));
2993
2994 if (UseGOT_PREL && Subtarget->isThumb()) {
2995 Register NewDestReg = createResultReg(TLI.getRegClassFor(VT));
2996 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
2997 TII.get(ARM::t2LDRi12), NewDestReg)
2998 .addReg(DestReg)
2999 .addImm(0);
3000 DestReg = NewDestReg;
3001 AddOptionalDefs(MIB);
3002 }
3003 return DestReg;
3004}
3005
3006bool ARMFastISel::fastLowerArguments() {
3007 if (!FuncInfo.CanLowerReturn)
3008 return false;
3009
3010 const Function *F = FuncInfo.Fn;
3011 if (F->isVarArg())
3012 return false;
3013
3014 CallingConv::ID CC = F->getCallingConv();
3015 switch (CC) {
3016 default:
3017 return false;
3018 case CallingConv::Fast:
3019 case CallingConv::C:
3023 case CallingConv::Swift:
3025 break;
3026 }
3027
3028 // Only handle simple cases. i.e. Up to 4 i8/i16/i32 scalar arguments
3029 // which are passed in r0 - r3.
3030 for (const Argument &Arg : F->args()) {
3031 if (Arg.getArgNo() >= 4)
3032 return false;
3033
3034 if (Arg.hasAttribute(Attribute::InReg) ||
3035 Arg.hasAttribute(Attribute::StructRet) ||
3036 Arg.hasAttribute(Attribute::SwiftSelf) ||
3037 Arg.hasAttribute(Attribute::SwiftError) ||
3038 Arg.hasAttribute(Attribute::ByVal))
3039 return false;
3040
3041 Type *ArgTy = Arg.getType();
3042 if (ArgTy->isStructTy() || ArgTy->isArrayTy() || ArgTy->isVectorTy())
3043 return false;
3044
3045 EVT ArgVT = TLI.getValueType(DL, ArgTy);
3046 if (!ArgVT.isSimple()) return false;
3047 switch (ArgVT.getSimpleVT().SimpleTy) {
3048 case MVT::i8:
3049 case MVT::i16:
3050 case MVT::i32:
3051 break;
3052 default:
3053 return false;
3054 }
3055 }
3056
3057 static const MCPhysReg GPRArgRegs[] = {
3058 ARM::R0, ARM::R1, ARM::R2, ARM::R3
3059 };
3060
3061 const TargetRegisterClass *RC = &ARM::rGPRRegClass;
3062 for (const Argument &Arg : F->args()) {
3063 unsigned ArgNo = Arg.getArgNo();
3064 unsigned SrcReg = GPRArgRegs[ArgNo];
3065 Register DstReg = FuncInfo.MF->addLiveIn(SrcReg, RC);
3066 // FIXME: Unfortunately it's necessary to emit a copy from the livein copy.
3067 // Without this, EmitLiveInCopies may eliminate the livein if its only
3068 // use is a bitcast (which isn't turned into an instruction).
3069 Register ResultReg = createResultReg(RC);
3070 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
3071 TII.get(TargetOpcode::COPY),
3072 ResultReg).addReg(DstReg, getKillRegState(true));
3073 updateValueMap(&Arg, ResultReg);
3074 }
3075
3076 return true;
3077}
3078
3079namespace llvm {
3080
3082 const TargetLibraryInfo *libInfo) {
3083 if (funcInfo.MF->getSubtarget<ARMSubtarget>().useFastISel())
3084 return new ARMFastISel(funcInfo, libInfo);
3085
3086 return nullptr;
3087 }
3088
3089} // end namespace llvm
unsigned const MachineRegisterInfo * MRI
static const MCPhysReg GPRArgRegs[]
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
static ARMCC::CondCodes getComparePred(CmpInst::Predicate Pred)
static const struct FoldableLoadExtendsStruct FoldableLoadExtends[]
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static cl::opt< ITMode > IT(cl::desc("IT block support"), cl::Hidden, cl::init(DefaultIT), cl::values(clEnumValN(DefaultIT, "arm-default-it", "Generate any type of IT block"), clEnumValN(RestrictedIT, "arm-restrict-it", "Disallow complex IT blocks")))
This file contains the simple types necessary to represent the attributes associated with functions a...
static const Function * getParent(const Value *V)
This file contains the declarations for the subclasses of Constant, which represent the different fla...
return RetTy
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
This file defines the DenseMap class.
uint64_t Addr
std::string Name
uint64_t Size
static bool isSigned(unsigned int Opcode)
This file defines the FastISel class.
static Register UseReg(const MachineOperand &MO)
const HexagonInstrInfo * TII
static MaybeAlign getAlign(Value *Ptr)
Definition: IRBuilder.cpp:531
IRTranslator LLVM IR MI
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
This file declares the MachineConstantPool class which is an abstract constant pool to keep track of ...
unsigned const TargetRegisterInfo * TRI
Module.h This file contains the declarations for the Module class.
uint64_t IntrinsicInst * II
const char LLVMTargetMachineRef TM
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the SmallVector class.
This file describes how to lower LLVM code to machine code.
static const unsigned FramePtr
Class for arbitrary precision integers.
Definition: APInt.h:78
uint64_t getZExtValue() const
Get zero extended value.
Definition: APInt.h:1500
int64_t getSExtValue() const
Get sign extended value.
Definition: APInt.h:1522
static ARMConstantPoolConstant * Create(const Constant *C, unsigned ID)
ARMConstantPoolValue - ARM specific constantpool value.
ARMFunctionInfo - This class is derived from MachineFunctionInfo and contains private ARM-specific in...
bool useFastISel() const
True if fast-isel is used.
an instruction to allocate memory on the stack
Definition: Instructions.h:61
PointerType * getType() const
Overload to return most specific pointer type.
Definition: Instructions.h:97
This class represents an incoming formal argument to a Function.
Definition: Argument.h:31
LLVM Basic Block Representation.
Definition: BasicBlock.h:61
Conditional or Unconditional Branch instruction.
BasicBlock * getSuccessor(unsigned i) const
Value * getCondition() const
CCState - This class holds information needed while lowering arguments and return values.
CCValAssign - Represent assignment of one arg/retval to a location.
bool isRegLoc() const
Register getLocReg() const
LocInfo getLocInfo() const
bool needsCustom() const
bool isMemLoc() const
int64_t getLocMemOffset() const
unsigned getValNo() const
CallingConv::ID getCallingConv() const
Definition: InstrTypes.h:1523
bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
User::op_iterator arg_begin()
Return the iterator pointing to the beginning of the argument list.
Definition: InstrTypes.h:1385
Value * getCalledOperand() const
Definition: InstrTypes.h:1458
User::op_iterator arg_end()
Return the iterator pointing to the end of the argument list.
Definition: InstrTypes.h:1391
FunctionType * getFunctionType() const
Definition: InstrTypes.h:1323
unsigned arg_size() const
Definition: InstrTypes.h:1408
This class represents a function call, abstracting a target machine's calling convention.
bool isTailCall() const
This class is the base class for the comparison instructions.
Definition: InstrTypes.h:747
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:757
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
Definition: InstrTypes.h:760
@ ICMP_SLT
signed less than
Definition: InstrTypes.h:786
@ ICMP_SLE
signed less or equal
Definition: InstrTypes.h:787
@ FCMP_OLT
0 1 0 0 True if ordered and less than
Definition: InstrTypes.h:763
@ FCMP_ULE
1 1 0 1 True if unordered, less than, or equal
Definition: InstrTypes.h:772
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
Definition: InstrTypes.h:761
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
Definition: InstrTypes.h:762
@ ICMP_UGE
unsigned greater or equal
Definition: InstrTypes.h:781
@ ICMP_UGT
unsigned greater than
Definition: InstrTypes.h:780
@ ICMP_SGT
signed greater than
Definition: InstrTypes.h:784
@ FCMP_ULT
1 1 0 0 True if unordered or less than
Definition: InstrTypes.h:771
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
Definition: InstrTypes.h:765
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
Definition: InstrTypes.h:768
@ ICMP_ULT
unsigned less than
Definition: InstrTypes.h:782
@ FCMP_UGT
1 0 1 0 True if unordered or greater than
Definition: InstrTypes.h:769
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
Definition: InstrTypes.h:764
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
Definition: InstrTypes.h:766
@ ICMP_EQ
equal
Definition: InstrTypes.h:778
@ ICMP_NE
not equal
Definition: InstrTypes.h:779
@ ICMP_SGE
signed greater or equal
Definition: InstrTypes.h:785
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
Definition: InstrTypes.h:773
@ ICMP_ULE
unsigned less or equal
Definition: InstrTypes.h:783
@ FCMP_UGE
1 0 1 1 True if unordered, greater than, or equal
Definition: InstrTypes.h:770
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
Definition: InstrTypes.h:767
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
Definition: InstrTypes.h:871
Predicate getPredicate() const
Return the predicate for this instruction.
Definition: InstrTypes.h:847
bool isUnsigned() const
Definition: InstrTypes.h:1013
A constant value that is initialized with an expression using other constant values.
Definition: Constants.h:1097
ConstantFP - Floating Point Values [float, double].
Definition: Constants.h:269
const APFloat & getValueAPF() const
Definition: Constants.h:312
This is the shared class of boolean and integer constants.
Definition: Constants.h:81
bool isNegative() const
Definition: Constants.h:201
int64_t getSExtValue() const
Return the constant as a 64-bit integer value after it has been sign extended as appropriate for the ...
Definition: Constants.h:161
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition: Constants.h:155
This is an important base class in LLVM.
Definition: Constant.h:42
This class represents an Operation in the Expression.
This is a fast-path instruction selection class that generates poor code and doesn't support illegal ...
Definition: FastISel.h:66
Register fastEmitInst_r(unsigned MachineInstOpcode, const TargetRegisterClass *RC, unsigned Op0)
Emit a MachineInstr with one register operand and a result register in the given register class.
Definition: FastISel.cpp:2035
virtual bool tryToFoldLoadIntoMI(MachineInstr *, unsigned, const LoadInst *)
The specified machine instr operand is a vreg, and that vreg is being provided by the specified load ...
Definition: FastISel.h:300
Register fastEmitInst_i(unsigned MachineInstOpcode, const TargetRegisterClass *RC, uint64_t Imm)
Emit a MachineInstr with a single immediate operand, and a result register in the given register clas...
Definition: FastISel.cpp:2201
virtual unsigned fastMaterializeConstant(const Constant *C)
Emit a constant in a register using target-specific logic, such as constant pool loads.
Definition: FastISel.h:473
Register fastEmitInst_rr(unsigned MachineInstOpcode, const TargetRegisterClass *RC, unsigned Op0, unsigned Op1)
Emit a MachineInstr with two register operands and a result register in the given register class.
Definition: FastISel.cpp:2056
virtual bool fastLowerArguments()
This method is called by target-independent code to do target- specific argument lowering.
Definition: FastISel.cpp:1933
const TargetInstrInfo & TII
Definition: FastISel.h:211
virtual bool fastSelectInstruction(const Instruction *I)=0
This method is called by target-independent code when the normal FastISel process fails to select an ...
const TargetLowering & TLI
Definition: FastISel.h:212
const TargetMachine & TM
Definition: FastISel.h:209
Register fastEmitInst_ri(unsigned MachineInstOpcode, const TargetRegisterClass *RC, unsigned Op0, uint64_t Imm)
Emit a MachineInstr with a register operand, an immediate, and a result register in the given registe...
Definition: FastISel.cpp:2107
virtual unsigned fastMaterializeAlloca(const AllocaInst *C)
Emit an alloca address in a register using target-specific logic.
Definition: FastISel.h:476
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition: Function.cpp:380
bool isDSOLocal() const
Definition: GlobalValue.h:305
bool isThreadLocal() const
If the value is "Thread Local", its value isn't shared by the threads.
Definition: GlobalValue.h:263
PointerType * getType() const
Global values are always pointers.
Definition: GlobalValue.h:294
@ ExternalLinkage
Externally visible function.
Definition: GlobalValue.h:52
Indirect Branch Instruction.
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:48
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
An instruction for reading from memory.
Definition: Instructions.h:174
Align getAlign() const
Return the alignment of the access that is being performed.
Definition: Instructions.h:209
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:198
ArrayRef< MCOperandInfo > operands() const
Definition: MCInstrDesc.h:239
This holds information about one operand of a machine instruction, indicating the register class for ...
Definition: MCInstrDesc.h:85
Machine Value Type.
SimpleValueType SimpleTy
bool isVector() const
Return true if this is a vector value type.
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
void setFrameAddressIsTaken(bool T)
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const MachineInstrBuilder & addExternalSymbol(const char *FnName, unsigned TargetFlags=0) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addConstantPoolIndex(unsigned Idx, int Offset=0, unsigned TargetFlags=0) const
const MachineInstrBuilder & addRegMask(const uint32_t *Mask) const
const MachineInstrBuilder & addGlobalAddress(const GlobalValue *GV, int64_t Offset=0, unsigned TargetFlags=0) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
Representation of each machine instruction.
Definition: MachineInstr.h:69
A description of a memory reference used in the backend.
Flags
Flags values. These may be or'd together.
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
MachineOperand class - Representation of each machine instruction operand.
Value * getLength() const
Value * getRawDest() const
MaybeAlign getDestAlign() const
unsigned getDestAddressSpace() const
bool isVolatile() const
This class wraps the llvm.memset and llvm.memset.inline intrinsics.
Value * getRawSource() const
Return the arguments to the instruction.
unsigned getSourceAddressSpace() const
MaybeAlign getSourceAlign() const
This class wraps the llvm.memcpy/memmove intrinsics.
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
constexpr bool isValid() const
Definition: Register.h:116
Return a value (possibly void), from a function.
size_t size() const
Definition: SmallVector.h:92
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:587
void reserve(size_type N)
Definition: SmallVector.h:677
void push_back(const T &Elt)
Definition: SmallVector.h:427
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1210
Used to lazily calculate structure layout information for a target machine, based on the DataLayout s...
Definition: DataLayout.h:581
TypeSize getElementOffset(unsigned Idx) const
Definition: DataLayout.h:610
Class to represent struct types.
Definition: DerivedTypes.h:216
TargetInstrInfo - Interface to description of machine instruction set.
Provides information about what library functions are available for the current target.
virtual bool hasStandaloneRem(EVT VT) const
Return true if the target can handle a standalone remainder operation.
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
CallingConv::ID getLibcallCallingConv(RTLIB::Libcall Call) const
Get the CallingConv that should be used for the specified libcall.
virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const
Return the register class that should be used for the specified value type.
virtual bool isFPImmLegal(const APFloat &, EVT, bool ForCodeSize=false) const
Returns true if the target can instruction select the specified FP immediate natively.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
const char * getLibcallName(RTLIB::Libcall Call) const
Get the libcall routine name for the specified libcall.
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
virtual bool supportSwiftError() const
Return true if the target supports swifterror attribute.
bool isPositionIndependent() const
virtual bool supportSplitCSR(MachineFunction *MF) const
Return true if the target supports that a subset of CSRs for the given machine function is handled ex...
Primary interface to the complete machine description for the target machine.
Definition: TargetMachine.h:77
bool contains(Register Reg) const
Return true if the specified register is included in this register class.
Target - Wrapper for Target specific information.
This class represents a truncation of integer types.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
bool isVectorTy() const
True if this is an instance of VectorType.
Definition: Type.h:261
bool isArrayTy() const
True if this is an instance of ArrayType.
Definition: Type.h:248
bool isFloatTy() const
Return true if this is 'float', a 32-bit IEEE fp type.
Definition: Type.h:153
bool isStructTy() const
True if this is an instance of StructType.
Definition: Type.h:245
bool isDoubleTy() const
Return true if this is 'double', a 64-bit IEEE fp type.
Definition: Type.h:156
static IntegerType * getInt32Ty(LLVMContext &C)
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:224
A Use represents the edge between a Value definition and its users.
Definition: Use.h:43
Value * getOperand(unsigned i) const
Definition: User.h:169
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
bool hasOneUse() const
Return true if there is exactly one use of this value.
Definition: Value.h:434
TypeSize getSequentialElementStride(const DataLayout &DL) const
const ParentTy * getParent() const
Definition: ilist_node.h:32
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
@ GOT_PREL
Thread Local Storage (General Dynamic Mode)
@ MO_NONLAZY
MO_NONLAZY - This is an independent flag, on a symbol operand "FOO" it represents a symbol which,...
Definition: ARMBaseInfo.h:288
int getSOImmVal(unsigned Arg)
getSOImmVal - Given a 32-bit immediate, if it is something that can fit into an shifter_operand immed...
int getFP32Imm(const APInt &Imm)
getFP32Imm - Return an 8-bit floating-point version of the 32-bit floating-point value.
int getT2SOImmVal(unsigned Arg)
getT2SOImmVal - Given a 32-bit immediate, if it is something that can fit into a Thumb-2 shifter_oper...
int getFP64Imm(const APInt &Imm)
getFP64Imm - Return an 8-bit floating-point version of the 64-bit floating-point value.
unsigned getSORegOpc(ShiftOpc ShOp, unsigned Imm)
FastISel * createFastISel(FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo)
@ Swift
Calling convention for Swift.
Definition: CallingConv.h:69
@ ARM_APCS
ARM Procedure Calling Standard (obsolete, but still used on some targets).
Definition: CallingConv.h:107
@ CFGuard_Check
Special calling convention on Windows for calling the Control Guard Check ICall funtion.
Definition: CallingConv.h:82
@ ARM_AAPCS
ARM Architecture Procedure Calling Standard calling convention (aka EABI).
Definition: CallingConv.h:111
@ CXX_FAST_TLS
Used for access functions.
Definition: CallingConv.h:72
@ GHC
Used by the Glasgow Haskell Compiler (GHC).
Definition: CallingConv.h:50
@ Fast
Attempts to make calls as fast as possible (e.g.
Definition: CallingConv.h:41
@ SwiftTail
This follows the Swift calling convention in how arguments are passed but guarantees tail calls will ...
Definition: CallingConv.h:87
@ ARM_AAPCS_VFP
Same as ARM_AAPCS, but uses hard floating point ABI.
Definition: CallingConv.h:114
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
@ ADD
Simple integer binary arithmetic operators.
Definition: ISDOpcodes.h:246
@ FADD
Simple binary floating point operators.
Definition: ISDOpcodes.h:397
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
Definition: ISDOpcodes.h:953
Predicate
Predicate - These are "(BI << 5) | BO" for various predicates.
Definition: PPCPredicates.h:26
Libcall
RTLIB::Libcall enum - This enum defines all of the runtime library calls the backend can emit.
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Define
Register definition.
@ Kill
The last use of a register.
Reg
All possible values of the reg field in the ModR/M byte.
NodeAddr< InstrNode * > Instr
Definition: RDFGraph.h:389
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Offset
Definition: DWP.cpp:480
bool CC_ARM_APCS_GHC(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
bool RetCC_ARM_AAPCS_VFP(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
Register constrainOperandRegClass(const MachineFunction &MF, const TargetRegisterInfo &TRI, MachineRegisterInfo &MRI, const TargetInstrInfo &TII, const RegisterBankInfo &RBI, MachineInstr &InsertPt, const TargetRegisterClass &RegClass, MachineOperand &RegMO)
Constrain the Register operand OpIdx, so that it is now constrained to the TargetRegisterClass passed...
Definition: Utils.cpp:56
void GetReturnInfo(CallingConv::ID CC, Type *ReturnType, AttributeList attr, SmallVectorImpl< ISD::OutputArg > &Outs, const TargetLowering &TLI, const DataLayout &DL)
Given an LLVM IR type and return type attributes, compute the return value EVTs and flags,...
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
bool FastCC_ARM_APCS(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
bool RetCC_ARM_AAPCS(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
static std::array< MachineOperand, 2 > predOps(ARMCC::CondCodes Pred, unsigned PredReg=0)
Get the operands corresponding to the given Pred value.
bool CC_ARM_AAPCS_VFP(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
bool CC_ARM_APCS(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
bool RetCC_ARM_APCS(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
bool CC_ARM_Win32_CFGuard_Check(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:167
bool CCAssignFn(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
CCAssignFn - This function assigns a location for Val, updating State to reflect the change.
unsigned getKillRegState(bool B)
bool RetFastCC_ARM_APCS(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
bool CC_ARM_AAPCS(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
static MachineOperand t1CondCodeOp(bool isDead=false)
Get the operand corresponding to the conditional code result for Thumb1.
gep_type_iterator gep_type_begin(const User *GEP)
static MachineOperand condCodeOp(unsigned CCReg=0)
Get the operand corresponding to the conditional code result.
unsigned gettBLXrOpcode(const MachineFunction &MF)
unsigned getBLXOpcode(const MachineFunction &MF)
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:860
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
Extended Value Type.
Definition: ValueTypes.h:35
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
Definition: ValueTypes.h:137
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
Definition: ValueTypes.h:307
static MachinePointerInfo getConstantPool(MachineFunction &MF)
Return a MachinePointerInfo record that refers to the constant pool.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition: Alignment.h:117
Align valueOrOne() const
For convenience, returns a valid alignment or 1 if undefined.
Definition: Alignment.h:141