LLVM 17.0.0git
X86RegisterInfo.cpp
Go to the documentation of this file.
1//===-- X86RegisterInfo.cpp - X86 Register Information --------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the X86 implementation of the TargetRegisterInfo class.
10// This file is responsible for the frame pointer elimination optimization
11// on X86.
12//
13//===----------------------------------------------------------------------===//
14
15#include "X86RegisterInfo.h"
16#include "X86FrameLowering.h"
18#include "X86Subtarget.h"
19#include "llvm/ADT/BitVector.h"
20#include "llvm/ADT/STLExtras.h"
21#include "llvm/ADT/SmallSet.h"
31#include "llvm/IR/Constants.h"
32#include "llvm/IR/Function.h"
33#include "llvm/IR/Type.h"
38
39using namespace llvm;
40
41#define GET_REGINFO_TARGET_DESC
42#include "X86GenRegisterInfo.inc"
43
44static cl::opt<bool>
45EnableBasePointer("x86-use-base-pointer", cl::Hidden, cl::init(true),
46 cl::desc("Enable use of a base pointer for complex stack frames"));
47
49 : X86GenRegisterInfo((TT.isArch64Bit() ? X86::RIP : X86::EIP),
50 X86_MC::getDwarfRegFlavour(TT, false),
51 X86_MC::getDwarfRegFlavour(TT, true),
52 (TT.isArch64Bit() ? X86::RIP : X86::EIP)) {
54
55 // Cache some information.
56 Is64Bit = TT.isArch64Bit();
57 IsWin64 = Is64Bit && TT.isOSWindows();
58
59 // Use a callee-saved register as the base pointer. These registers must
60 // not conflict with any ABI requirements. For example, in 32-bit mode PIC
61 // requires GOT in the EBX register before function calls via PLT GOT pointer.
62 if (Is64Bit) {
63 SlotSize = 8;
64 // This matches the simplified 32-bit pointer code in the data layout
65 // computation.
66 // FIXME: Should use the data layout?
67 bool Use64BitReg = !TT.isX32();
68 StackPtr = Use64BitReg ? X86::RSP : X86::ESP;
69 FramePtr = Use64BitReg ? X86::RBP : X86::EBP;
70 BasePtr = Use64BitReg ? X86::RBX : X86::EBX;
71 } else {
72 SlotSize = 4;
73 StackPtr = X86::ESP;
74 FramePtr = X86::EBP;
75 BasePtr = X86::ESI;
76 }
77}
78
79int
81 return getEncodingValue(i);
82}
83
86 unsigned Idx) const {
87 // The sub_8bit sub-register index is more constrained in 32-bit mode.
88 // It behaves just like the sub_8bit_hi index.
89 if (!Is64Bit && Idx == X86::sub_8bit)
90 Idx = X86::sub_8bit_hi;
91
92 // Forward to TableGen's default version.
93 return X86GenRegisterInfo::getSubClassWithSubReg(RC, Idx);
94}
95
99 unsigned SubIdx) const {
100 // The sub_8bit sub-register index is more constrained in 32-bit mode.
101 if (!Is64Bit && SubIdx == X86::sub_8bit) {
102 A = X86GenRegisterInfo::getSubClassWithSubReg(A, X86::sub_8bit_hi);
103 if (!A)
104 return nullptr;
105 }
106 return X86GenRegisterInfo::getMatchingSuperRegClass(A, B, SubIdx);
107}
108
111 const MachineFunction &MF) const {
112 // Don't allow super-classes of GR8_NOREX. This class is only used after
113 // extracting sub_8bit_hi sub-registers. The H sub-registers cannot be copied
114 // to the full GR8 register class in 64-bit mode, so we cannot allow the
115 // reigster class inflation.
116 //
117 // The GR8_NOREX class is always used in a way that won't be constrained to a
118 // sub-class, so sub-classes like GR8_ABCD_L are allowed to expand to the
119 // full GR8 class.
120 if (RC == &X86::GR8_NOREXRegClass)
121 return RC;
122
123 const X86Subtarget &Subtarget = MF.getSubtarget<X86Subtarget>();
124
125 const TargetRegisterClass *Super = RC;
127 do {
128 switch (Super->getID()) {
129 case X86::FR32RegClassID:
130 case X86::FR64RegClassID:
131 // If AVX-512 isn't supported we should only inflate to these classes.
132 if (!Subtarget.hasAVX512() &&
133 getRegSizeInBits(*Super) == getRegSizeInBits(*RC))
134 return Super;
135 break;
136 case X86::VR128RegClassID:
137 case X86::VR256RegClassID:
138 // If VLX isn't supported we should only inflate to these classes.
139 if (!Subtarget.hasVLX() &&
140 getRegSizeInBits(*Super) == getRegSizeInBits(*RC))
141 return Super;
142 break;
143 case X86::VR128XRegClassID:
144 case X86::VR256XRegClassID:
145 // If VLX isn't support we shouldn't inflate to these classes.
146 if (Subtarget.hasVLX() &&
147 getRegSizeInBits(*Super) == getRegSizeInBits(*RC))
148 return Super;
149 break;
150 case X86::FR32XRegClassID:
151 case X86::FR64XRegClassID:
152 // If AVX-512 isn't support we shouldn't inflate to these classes.
153 if (Subtarget.hasAVX512() &&
154 getRegSizeInBits(*Super) == getRegSizeInBits(*RC))
155 return Super;
156 break;
157 case X86::GR8RegClassID:
158 case X86::GR16RegClassID:
159 case X86::GR32RegClassID:
160 case X86::GR64RegClassID:
161 case X86::RFP32RegClassID:
162 case X86::RFP64RegClassID:
163 case X86::RFP80RegClassID:
164 case X86::VR512_0_15RegClassID:
165 case X86::VR512RegClassID:
166 // Don't return a super-class that would shrink the spill size.
167 // That can happen with the vector and float classes.
168 if (getRegSizeInBits(*Super) == getRegSizeInBits(*RC))
169 return Super;
170 }
171 Super = *I++;
172 } while (Super);
173 return RC;
174}
175
178 unsigned Kind) const {
179 const X86Subtarget &Subtarget = MF.getSubtarget<X86Subtarget>();
180 switch (Kind) {
181 default: llvm_unreachable("Unexpected Kind in getPointerRegClass!");
182 case 0: // Normal GPRs.
183 if (Subtarget.isTarget64BitLP64())
184 return &X86::GR64RegClass;
185 // If the target is 64bit but we have been told to use 32bit addresses,
186 // we can still use 64-bit register as long as we know the high bits
187 // are zeros.
188 // Reflect that in the returned register class.
189 if (Is64Bit) {
190 // When the target also allows 64-bit frame pointer and we do have a
191 // frame, this is fine to use it for the address accesses as well.
192 const X86FrameLowering *TFI = getFrameLowering(MF);
193 return TFI->hasFP(MF) && TFI->Uses64BitFramePtr
194 ? &X86::LOW32_ADDR_ACCESS_RBPRegClass
195 : &X86::LOW32_ADDR_ACCESSRegClass;
196 }
197 return &X86::GR32RegClass;
198 case 1: // Normal GPRs except the stack pointer (for encoding reasons).
199 if (Subtarget.isTarget64BitLP64())
200 return &X86::GR64_NOSPRegClass;
201 // NOSP does not contain RIP, so no special case here.
202 return &X86::GR32_NOSPRegClass;
203 case 2: // NOREX GPRs.
204 if (Subtarget.isTarget64BitLP64())
205 return &X86::GR64_NOREXRegClass;
206 return &X86::GR32_NOREXRegClass;
207 case 3: // NOREX GPRs except the stack pointer (for encoding reasons).
208 if (Subtarget.isTarget64BitLP64())
209 return &X86::GR64_NOREX_NOSPRegClass;
210 // NOSP does not contain RIP, so no special case here.
211 return &X86::GR32_NOREX_NOSPRegClass;
212 case 4: // Available for tailcall (not callee-saved GPRs).
213 return getGPRsForTailCall(MF);
214 }
215}
216
218 unsigned DefSubReg,
219 const TargetRegisterClass *SrcRC,
220 unsigned SrcSubReg) const {
221 // Prevent rewriting a copy where the destination size is larger than the
222 // input size. See PR41619.
223 // FIXME: Should this be factored into the base implementation somehow.
224 if (DefRC->hasSuperClassEq(&X86::GR64RegClass) && DefSubReg == 0 &&
225 SrcRC->hasSuperClassEq(&X86::GR64RegClass) && SrcSubReg == X86::sub_32bit)
226 return false;
227
228 return TargetRegisterInfo::shouldRewriteCopySrc(DefRC, DefSubReg,
229 SrcRC, SrcSubReg);
230}
231
234 const Function &F = MF.getFunction();
235 if (IsWin64 || (F.getCallingConv() == CallingConv::Win64))
236 return &X86::GR64_TCW64RegClass;
237 else if (Is64Bit)
238 return &X86::GR64_TCRegClass;
239
240 bool hasHipeCC = (F.getCallingConv() == CallingConv::HiPE);
241 if (hasHipeCC)
242 return &X86::GR32RegClass;
243 return &X86::GR32_TCRegClass;
244}
245
248 if (RC == &X86::CCRRegClass) {
249 if (Is64Bit)
250 return &X86::GR64RegClass;
251 else
252 return &X86::GR32RegClass;
253 }
254 return RC;
255}
256
257unsigned
259 MachineFunction &MF) const {
260 const X86FrameLowering *TFI = getFrameLowering(MF);
261
262 unsigned FPDiff = TFI->hasFP(MF) ? 1 : 0;
263 switch (RC->getID()) {
264 default:
265 return 0;
266 case X86::GR32RegClassID:
267 return 4 - FPDiff;
268 case X86::GR64RegClassID:
269 return 12 - FPDiff;
270 case X86::VR128RegClassID:
271 return Is64Bit ? 10 : 4;
272 case X86::VR64RegClassID:
273 return 4;
274 }
275}
276
277const MCPhysReg *
279 assert(MF && "MachineFunction required");
280
281 const X86Subtarget &Subtarget = MF->getSubtarget<X86Subtarget>();
282 const Function &F = MF->getFunction();
283 bool HasSSE = Subtarget.hasSSE1();
284 bool HasAVX = Subtarget.hasAVX();
285 bool HasAVX512 = Subtarget.hasAVX512();
286 bool CallsEHReturn = MF->callsEHReturn();
287
288 CallingConv::ID CC = F.getCallingConv();
289
290 // If attribute NoCallerSavedRegisters exists then we set X86_INTR calling
291 // convention because it has the CSR list.
292 if (MF->getFunction().hasFnAttribute("no_caller_saved_registers"))
294
295 // If atribute specified, override the CSRs normally specified by the
296 // calling convention and use the empty set instead.
297 if (MF->getFunction().hasFnAttribute("no_callee_saved_registers"))
298 return CSR_NoRegs_SaveList;
299
300 switch (CC) {
301 case CallingConv::GHC:
303 return CSR_NoRegs_SaveList;
305 if (HasAVX)
306 return CSR_64_AllRegs_AVX_SaveList;
307 return CSR_64_AllRegs_SaveList;
309 return CSR_64_RT_MostRegs_SaveList;
311 if (HasAVX)
312 return CSR_64_RT_AllRegs_AVX_SaveList;
313 return CSR_64_RT_AllRegs_SaveList;
315 if (Is64Bit)
316 return MF->getInfo<X86MachineFunctionInfo>()->isSplitCSR() ?
317 CSR_64_CXX_TLS_Darwin_PE_SaveList : CSR_64_TLS_Darwin_SaveList;
318 break;
320 if (HasAVX512 && IsWin64)
321 return CSR_Win64_Intel_OCL_BI_AVX512_SaveList;
322 if (HasAVX512 && Is64Bit)
323 return CSR_64_Intel_OCL_BI_AVX512_SaveList;
324 if (HasAVX && IsWin64)
325 return CSR_Win64_Intel_OCL_BI_AVX_SaveList;
326 if (HasAVX && Is64Bit)
327 return CSR_64_Intel_OCL_BI_AVX_SaveList;
328 if (!HasAVX && !IsWin64 && Is64Bit)
329 return CSR_64_Intel_OCL_BI_SaveList;
330 break;
331 }
333 if (Is64Bit) {
334 if (IsWin64) {
335 return (HasSSE ? CSR_Win64_RegCall_SaveList :
336 CSR_Win64_RegCall_NoSSE_SaveList);
337 } else {
338 return (HasSSE ? CSR_SysV64_RegCall_SaveList :
339 CSR_SysV64_RegCall_NoSSE_SaveList);
340 }
341 } else {
342 return (HasSSE ? CSR_32_RegCall_SaveList :
343 CSR_32_RegCall_NoSSE_SaveList);
344 }
346 assert(!Is64Bit && "CFGuard check mechanism only used on 32-bit X86");
347 return (HasSSE ? CSR_Win32_CFGuard_Check_SaveList
348 : CSR_Win32_CFGuard_Check_NoSSE_SaveList);
350 if (Is64Bit)
351 return CSR_64_MostRegs_SaveList;
352 break;
354 if (!HasSSE)
355 return CSR_Win64_NoSSE_SaveList;
356 return CSR_Win64_SaveList;
358 if (!Is64Bit)
359 return CSR_32_SaveList;
360 return IsWin64 ? CSR_Win64_SwiftTail_SaveList : CSR_64_SwiftTail_SaveList;
362 if (CallsEHReturn)
363 return CSR_64EHRet_SaveList;
364 return CSR_64_SaveList;
366 if (Is64Bit) {
367 if (HasAVX512)
368 return CSR_64_AllRegs_AVX512_SaveList;
369 if (HasAVX)
370 return CSR_64_AllRegs_AVX_SaveList;
371 if (HasSSE)
372 return CSR_64_AllRegs_SaveList;
373 return CSR_64_AllRegs_NoSSE_SaveList;
374 } else {
375 if (HasAVX512)
376 return CSR_32_AllRegs_AVX512_SaveList;
377 if (HasAVX)
378 return CSR_32_AllRegs_AVX_SaveList;
379 if (HasSSE)
380 return CSR_32_AllRegs_SSE_SaveList;
381 return CSR_32_AllRegs_SaveList;
382 }
383 default:
384 break;
385 }
386
387 if (Is64Bit) {
388 bool IsSwiftCC = Subtarget.getTargetLowering()->supportSwiftError() &&
389 F.getAttributes().hasAttrSomewhere(Attribute::SwiftError);
390 if (IsSwiftCC)
391 return IsWin64 ? CSR_Win64_SwiftError_SaveList
392 : CSR_64_SwiftError_SaveList;
393
394 if (IsWin64)
395 return HasSSE ? CSR_Win64_SaveList : CSR_Win64_NoSSE_SaveList;
396 if (CallsEHReturn)
397 return CSR_64EHRet_SaveList;
398 return CSR_64_SaveList;
399 }
400
401 return CallsEHReturn ? CSR_32EHRet_SaveList : CSR_32_SaveList;
402}
403
405 const MachineFunction *MF) const {
406 assert(MF && "Invalid MachineFunction pointer.");
409 return CSR_64_CXX_TLS_Darwin_ViaCopy_SaveList;
410 return nullptr;
411}
412
413const uint32_t *
415 CallingConv::ID CC) const {
416 const X86Subtarget &Subtarget = MF.getSubtarget<X86Subtarget>();
417 bool HasSSE = Subtarget.hasSSE1();
418 bool HasAVX = Subtarget.hasAVX();
419 bool HasAVX512 = Subtarget.hasAVX512();
420
421 switch (CC) {
422 case CallingConv::GHC:
424 return CSR_NoRegs_RegMask;
426 if (HasAVX)
427 return CSR_64_AllRegs_AVX_RegMask;
428 return CSR_64_AllRegs_RegMask;
430 return CSR_64_RT_MostRegs_RegMask;
432 if (HasAVX)
433 return CSR_64_RT_AllRegs_AVX_RegMask;
434 return CSR_64_RT_AllRegs_RegMask;
436 if (Is64Bit)
437 return CSR_64_TLS_Darwin_RegMask;
438 break;
440 if (HasAVX512 && IsWin64)
441 return CSR_Win64_Intel_OCL_BI_AVX512_RegMask;
442 if (HasAVX512 && Is64Bit)
443 return CSR_64_Intel_OCL_BI_AVX512_RegMask;
444 if (HasAVX && IsWin64)
445 return CSR_Win64_Intel_OCL_BI_AVX_RegMask;
446 if (HasAVX && Is64Bit)
447 return CSR_64_Intel_OCL_BI_AVX_RegMask;
448 if (!HasAVX && !IsWin64 && Is64Bit)
449 return CSR_64_Intel_OCL_BI_RegMask;
450 break;
451 }
453 if (Is64Bit) {
454 if (IsWin64) {
455 return (HasSSE ? CSR_Win64_RegCall_RegMask :
456 CSR_Win64_RegCall_NoSSE_RegMask);
457 } else {
458 return (HasSSE ? CSR_SysV64_RegCall_RegMask :
459 CSR_SysV64_RegCall_NoSSE_RegMask);
460 }
461 } else {
462 return (HasSSE ? CSR_32_RegCall_RegMask :
463 CSR_32_RegCall_NoSSE_RegMask);
464 }
466 assert(!Is64Bit && "CFGuard check mechanism only used on 32-bit X86");
467 return (HasSSE ? CSR_Win32_CFGuard_Check_RegMask
468 : CSR_Win32_CFGuard_Check_NoSSE_RegMask);
470 if (Is64Bit)
471 return CSR_64_MostRegs_RegMask;
472 break;
474 return CSR_Win64_RegMask;
476 if (!Is64Bit)
477 return CSR_32_RegMask;
478 return IsWin64 ? CSR_Win64_SwiftTail_RegMask : CSR_64_SwiftTail_RegMask;
480 return CSR_64_RegMask;
482 if (Is64Bit) {
483 if (HasAVX512)
484 return CSR_64_AllRegs_AVX512_RegMask;
485 if (HasAVX)
486 return CSR_64_AllRegs_AVX_RegMask;
487 if (HasSSE)
488 return CSR_64_AllRegs_RegMask;
489 return CSR_64_AllRegs_NoSSE_RegMask;
490 } else {
491 if (HasAVX512)
492 return CSR_32_AllRegs_AVX512_RegMask;
493 if (HasAVX)
494 return CSR_32_AllRegs_AVX_RegMask;
495 if (HasSSE)
496 return CSR_32_AllRegs_SSE_RegMask;
497 return CSR_32_AllRegs_RegMask;
498 }
499 default:
500 break;
501 }
502
503 // Unlike getCalleeSavedRegs(), we don't have MMI so we can't check
504 // callsEHReturn().
505 if (Is64Bit) {
506 const Function &F = MF.getFunction();
507 bool IsSwiftCC = Subtarget.getTargetLowering()->supportSwiftError() &&
508 F.getAttributes().hasAttrSomewhere(Attribute::SwiftError);
509 if (IsSwiftCC)
510 return IsWin64 ? CSR_Win64_SwiftError_RegMask : CSR_64_SwiftError_RegMask;
511
512 return IsWin64 ? CSR_Win64_RegMask : CSR_64_RegMask;
513 }
514
515 return CSR_32_RegMask;
516}
517
518const uint32_t*
520 return CSR_NoRegs_RegMask;
521}
522
524 return CSR_64_TLS_Darwin_RegMask;
525}
526
528 BitVector Reserved(getNumRegs());
529 const X86FrameLowering *TFI = getFrameLowering(MF);
530
531 // Set the floating point control register as reserved.
532 Reserved.set(X86::FPCW);
533
534 // Set the floating point status register as reserved.
535 Reserved.set(X86::FPSW);
536
537 // Set the SIMD floating point control register as reserved.
538 Reserved.set(X86::MXCSR);
539
540 // Set the stack-pointer register and its aliases as reserved.
541 for (const MCPhysReg &SubReg : subregs_inclusive(X86::RSP))
542 Reserved.set(SubReg);
543
544 // Set the Shadow Stack Pointer as reserved.
545 Reserved.set(X86::SSP);
546
547 // Set the instruction pointer register and its aliases as reserved.
548 for (const MCPhysReg &SubReg : subregs_inclusive(X86::RIP))
549 Reserved.set(SubReg);
550
551 // Set the frame-pointer register and its aliases as reserved if needed.
552 if (TFI->hasFP(MF)) {
553 for (const MCPhysReg &SubReg : subregs_inclusive(X86::RBP))
554 Reserved.set(SubReg);
555 }
556
557 // Set the base-pointer register and its aliases as reserved if needed.
558 if (hasBasePointer(MF)) {
560 const uint32_t *RegMask = getCallPreservedMask(MF, CC);
563 "Stack realignment in presence of dynamic allocas is not supported with"
564 "this calling convention.");
565
567 for (const MCPhysReg &SubReg : subregs_inclusive(BasePtr))
568 Reserved.set(SubReg);
569 }
570
571 // Mark the segment registers as reserved.
572 Reserved.set(X86::CS);
573 Reserved.set(X86::SS);
574 Reserved.set(X86::DS);
575 Reserved.set(X86::ES);
576 Reserved.set(X86::FS);
577 Reserved.set(X86::GS);
578
579 // Mark the floating point stack registers as reserved.
580 for (unsigned n = 0; n != 8; ++n)
581 Reserved.set(X86::ST0 + n);
582
583 // Reserve the registers that only exist in 64-bit mode.
584 if (!Is64Bit) {
585 // These 8-bit registers are part of the x86-64 extension even though their
586 // super-registers are old 32-bits.
587 Reserved.set(X86::SIL);
588 Reserved.set(X86::DIL);
589 Reserved.set(X86::BPL);
590 Reserved.set(X86::SPL);
591 Reserved.set(X86::SIH);
592 Reserved.set(X86::DIH);
593 Reserved.set(X86::BPH);
594 Reserved.set(X86::SPH);
595
596 for (unsigned n = 0; n != 8; ++n) {
597 // R8, R9, ...
598 for (MCRegAliasIterator AI(X86::R8 + n, this, true); AI.isValid(); ++AI)
599 Reserved.set(*AI);
600
601 // XMM8, XMM9, ...
602 for (MCRegAliasIterator AI(X86::XMM8 + n, this, true); AI.isValid(); ++AI)
603 Reserved.set(*AI);
604 }
605 }
606 if (!Is64Bit || !MF.getSubtarget<X86Subtarget>().hasAVX512()) {
607 for (unsigned n = 16; n != 32; ++n) {
608 for (MCRegAliasIterator AI(X86::XMM0 + n, this, true); AI.isValid(); ++AI)
609 Reserved.set(*AI);
610 }
611 }
612
613 assert(checkAllSuperRegsMarked(Reserved,
614 {X86::SIL, X86::DIL, X86::BPL, X86::SPL,
615 X86::SIH, X86::DIH, X86::BPH, X86::SPH}));
616 return Reserved;
617}
618
620 MCRegister Reg) const {
621 const X86Subtarget &ST = MF.getSubtarget<X86Subtarget>();
622 const TargetRegisterInfo &TRI = *ST.getRegisterInfo();
623 auto IsSubReg = [&](MCRegister RegA, MCRegister RegB) {
624 return TRI.isSuperOrSubRegisterEq(RegA, RegB);
625 };
626
627 if (!ST.is64Bit())
628 return llvm::any_of(
629 SmallVector<MCRegister>{X86::EAX, X86::ECX, X86::EDX},
630 [&](MCRegister &RegA) { return IsSubReg(RegA, Reg); }) ||
631 (ST.hasMMX() && X86::VR64RegClass.contains(Reg));
632
634
635 if (CC == CallingConv::X86_64_SysV && IsSubReg(X86::RAX, Reg))
636 return true;
637
638 if (llvm::any_of(
639 SmallVector<MCRegister>{X86::RDX, X86::RCX, X86::R8, X86::R9},
640 [&](MCRegister &RegA) { return IsSubReg(RegA, Reg); }))
641 return true;
642
643 if (CC != CallingConv::Win64 &&
644 llvm::any_of(SmallVector<MCRegister>{X86::RDI, X86::RSI},
645 [&](MCRegister &RegA) { return IsSubReg(RegA, Reg); }))
646 return true;
647
648 if (ST.hasSSE1() &&
649 llvm::any_of(SmallVector<MCRegister>{X86::XMM0, X86::XMM1, X86::XMM2,
650 X86::XMM3, X86::XMM4, X86::XMM5,
651 X86::XMM6, X86::XMM7},
652 [&](MCRegister &RegA) { return IsSubReg(RegA, Reg); }))
653 return true;
654
655 return X86GenRegisterInfo::isArgumentRegister(MF, Reg);
656}
657
659 MCRegister PhysReg) const {
660 const X86Subtarget &ST = MF.getSubtarget<X86Subtarget>();
661 const TargetRegisterInfo &TRI = *ST.getRegisterInfo();
662
663 // Stack pointer.
664 if (TRI.isSuperOrSubRegisterEq(X86::RSP, PhysReg))
665 return true;
666
667 // Don't use the frame pointer if it's being used.
668 const X86FrameLowering &TFI = *getFrameLowering(MF);
669 if (TFI.hasFP(MF) && TRI.isSuperOrSubRegisterEq(X86::RBP, PhysReg))
670 return true;
671
672 return X86GenRegisterInfo::isFixedRegister(MF, PhysReg);
673}
674
676 return RC->getID() == X86::TILERegClassID;
677}
678
680 // Check if the EFLAGS register is marked as live-out. This shouldn't happen,
681 // because the calling convention defines the EFLAGS register as NOT
682 // preserved.
683 //
684 // Unfortunatelly the EFLAGS show up as live-out after branch folding. Adding
685 // an assert to track this and clear the register afterwards to avoid
686 // unnecessary crashes during release builds.
687 assert(!(Mask[X86::EFLAGS / 32] & (1U << (X86::EFLAGS % 32))) &&
688 "EFLAGS are not live-out from a patchpoint.");
689
690 // Also clean other registers that don't need preserving (IP).
691 for (auto Reg : {X86::EFLAGS, X86::RIP, X86::EIP, X86::IP})
692 Mask[Reg / 32] &= ~(1U << (Reg % 32));
693}
694
695//===----------------------------------------------------------------------===//
696// Stack Frame Processing methods
697//===----------------------------------------------------------------------===//
698
699static bool CantUseSP(const MachineFrameInfo &MFI) {
700 return MFI.hasVarSizedObjects() || MFI.hasOpaqueSPAdjustment();
701}
702
705 // We have a virtual register to reference argument, and don't need base
706 // pointer.
707 if (X86FI->getStackPtrSaveMI() != nullptr)
708 return false;
709
710 if (X86FI->hasPreallocatedCall())
711 return true;
712
713 const MachineFrameInfo &MFI = MF.getFrameInfo();
714
716 return false;
717
718 // When we need stack realignment, we can't address the stack from the frame
719 // pointer. When we have dynamic allocas or stack-adjusting inline asm, we
720 // can't address variables from the stack pointer. MS inline asm can
721 // reference locals while also adjusting the stack pointer. When we can't
722 // use both the SP and the FP, we need a separate base pointer register.
723 bool CantUseFP = hasStackRealignment(MF);
724 return CantUseFP && CantUseSP(MFI);
725}
726
729 return false;
730
731 const MachineFrameInfo &MFI = MF.getFrameInfo();
732 const MachineRegisterInfo *MRI = &MF.getRegInfo();
733
734 // Stack realignment requires a frame pointer. If we already started
735 // register allocation with frame pointer elimination, it is too late now.
736 if (!MRI->canReserveReg(FramePtr))
737 return false;
738
739 // If a base pointer is necessary. Check that it isn't too late to reserve
740 // it.
741 if (CantUseSP(MFI))
742 return MRI->canReserveReg(BasePtr);
743 return true;
744}
745
746// tryOptimizeLEAtoMOV - helper function that tries to replace a LEA instruction
747// of the form 'lea (%esp), %ebx' --> 'mov %esp, %ebx'.
748// TODO: In this case we should be really trying first to entirely eliminate
749// this instruction which is a plain copy.
751 MachineInstr &MI = *II;
752 unsigned Opc = II->getOpcode();
753 // Check if this is a LEA of the form 'lea (%esp), %ebx'
754 if ((Opc != X86::LEA32r && Opc != X86::LEA64r && Opc != X86::LEA64_32r) ||
755 MI.getOperand(2).getImm() != 1 ||
756 MI.getOperand(3).getReg() != X86::NoRegister ||
757 MI.getOperand(4).getImm() != 0 ||
758 MI.getOperand(5).getReg() != X86::NoRegister)
759 return false;
760 Register BasePtr = MI.getOperand(1).getReg();
761 // In X32 mode, ensure the base-pointer is a 32-bit operand, so the LEA will
762 // be replaced with a 32-bit operand MOV which will zero extend the upper
763 // 32-bits of the super register.
764 if (Opc == X86::LEA64_32r)
765 BasePtr = getX86SubSuperRegister(BasePtr, 32);
766 Register NewDestReg = MI.getOperand(0).getReg();
767 const X86InstrInfo *TII =
768 MI.getParent()->getParent()->getSubtarget<X86Subtarget>().getInstrInfo();
769 TII->copyPhysReg(*MI.getParent(), II, MI.getDebugLoc(), NewDestReg, BasePtr,
770 MI.getOperand(1).isKill());
771 MI.eraseFromParent();
772 return true;
773}
774
776 switch (MI.getOpcode()) {
777 case X86::CATCHRET:
778 case X86::CLEANUPRET:
779 return true;
780 default:
781 return false;
782 }
783 llvm_unreachable("impossible");
784}
785
787 unsigned FIOperandNum,
788 Register BaseReg,
789 int FIOffset) const {
790 MachineInstr &MI = *II;
791 unsigned Opc = MI.getOpcode();
792 if (Opc == TargetOpcode::LOCAL_ESCAPE) {
793 MachineOperand &FI = MI.getOperand(FIOperandNum);
794 FI.ChangeToImmediate(FIOffset);
795 return;
796 }
797
798 MI.getOperand(FIOperandNum).ChangeToRegister(BaseReg, false);
799
800 // The frame index format for stackmaps and patchpoints is different from the
801 // X86 format. It only has a FI and an offset.
802 if (Opc == TargetOpcode::STACKMAP || Opc == TargetOpcode::PATCHPOINT) {
803 assert(BasePtr == FramePtr && "Expected the FP as base register");
804 int64_t Offset = MI.getOperand(FIOperandNum + 1).getImm() + FIOffset;
805 MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset);
806 return;
807 }
808
809 if (MI.getOperand(FIOperandNum + 3).isImm()) {
810 // Offset is a 32-bit integer.
811 int Imm = (int)(MI.getOperand(FIOperandNum + 3).getImm());
812 int Offset = FIOffset + Imm;
813 assert((!Is64Bit || isInt<32>((long long)FIOffset + Imm)) &&
814 "Requesting 64-bit offset in 32-bit immediate!");
815 if (Offset != 0 || !tryOptimizeLEAtoMOV(II))
816 MI.getOperand(FIOperandNum + 3).ChangeToImmediate(Offset);
817 } else {
818 // Offset is symbolic. This is extremely rare.
820 FIOffset + (uint64_t)MI.getOperand(FIOperandNum + 3).getOffset();
821 MI.getOperand(FIOperandNum + 3).setOffset(Offset);
822 }
823}
824
825bool
827 int SPAdj, unsigned FIOperandNum,
828 RegScavenger *RS) const {
829 MachineInstr &MI = *II;
830 MachineBasicBlock &MBB = *MI.getParent();
833 bool IsEHFuncletEpilogue = MBBI == MBB.end() ? false
835 const X86FrameLowering *TFI = getFrameLowering(MF);
836 int FrameIndex = MI.getOperand(FIOperandNum).getIndex();
837
838 // Determine base register and offset.
839 int FIOffset;
840 Register BasePtr;
841 if (MI.isReturn()) {
842 assert((!hasStackRealignment(MF) ||
843 MF.getFrameInfo().isFixedObjectIndex(FrameIndex)) &&
844 "Return instruction can only reference SP relative frame objects");
845 FIOffset =
846 TFI->getFrameIndexReferenceSP(MF, FrameIndex, BasePtr, 0).getFixed();
847 } else if (TFI->Is64Bit && (MBB.isEHFuncletEntry() || IsEHFuncletEpilogue)) {
848 FIOffset = TFI->getWin64EHFrameIndexRef(MF, FrameIndex, BasePtr);
849 } else {
850 FIOffset = TFI->getFrameIndexReference(MF, FrameIndex, BasePtr).getFixed();
851 }
852
853 // LOCAL_ESCAPE uses a single offset, with no register. It only works in the
854 // simple FP case, and doesn't work with stack realignment. On 32-bit, the
855 // offset is from the traditional base pointer location. On 64-bit, the
856 // offset is from the SP at the end of the prologue, not the FP location. This
857 // matches the behavior of llvm.frameaddress.
858 unsigned Opc = MI.getOpcode();
859 if (Opc == TargetOpcode::LOCAL_ESCAPE) {
860 MachineOperand &FI = MI.getOperand(FIOperandNum);
861 FI.ChangeToImmediate(FIOffset);
862 return false;
863 }
864
865 // For LEA64_32r when BasePtr is 32-bits (X32) we can use full-size 64-bit
866 // register as source operand, semantic is the same and destination is
867 // 32-bits. It saves one byte per lea in code since 0x67 prefix is avoided.
868 // Don't change BasePtr since it is used later for stack adjustment.
869 Register MachineBasePtr = BasePtr;
870 if (Opc == X86::LEA64_32r && X86::GR32RegClass.contains(BasePtr))
871 MachineBasePtr = getX86SubSuperRegister(BasePtr, 64);
872
873 // This must be part of a four operand memory reference. Replace the
874 // FrameIndex with base register. Add an offset to the offset.
875 MI.getOperand(FIOperandNum).ChangeToRegister(MachineBasePtr, false);
876
877 if (BasePtr == StackPtr)
878 FIOffset += SPAdj;
879
880 // The frame index format for stackmaps and patchpoints is different from the
881 // X86 format. It only has a FI and an offset.
882 if (Opc == TargetOpcode::STACKMAP || Opc == TargetOpcode::PATCHPOINT) {
883 assert(BasePtr == FramePtr && "Expected the FP as base register");
884 int64_t Offset = MI.getOperand(FIOperandNum + 1).getImm() + FIOffset;
885 MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset);
886 return false;
887 }
888
889 if (MI.getOperand(FIOperandNum+3).isImm()) {
890 // Offset is a 32-bit integer.
891 int Imm = (int)(MI.getOperand(FIOperandNum + 3).getImm());
892 int Offset = FIOffset + Imm;
893 assert((!Is64Bit || isInt<32>((long long)FIOffset + Imm)) &&
894 "Requesting 64-bit offset in 32-bit immediate!");
895 if (Offset != 0 || !tryOptimizeLEAtoMOV(II))
896 MI.getOperand(FIOperandNum + 3).ChangeToImmediate(Offset);
897 } else {
898 // Offset is symbolic. This is extremely rare.
899 uint64_t Offset = FIOffset +
900 (uint64_t)MI.getOperand(FIOperandNum+3).getOffset();
901 MI.getOperand(FIOperandNum + 3).setOffset(Offset);
902 }
903 return false;
904}
905
908 const MachineFunction *MF = MBB.getParent();
909 if (MF->callsEHReturn())
910 return 0;
911
912 const TargetRegisterClass &AvailableRegs = *getGPRsForTailCall(*MF);
913
914 if (MBBI == MBB.end())
915 return 0;
916
917 switch (MBBI->getOpcode()) {
918 default:
919 return 0;
920 case TargetOpcode::PATCHABLE_RET:
921 case X86::RET:
922 case X86::RET32:
923 case X86::RET64:
924 case X86::RETI32:
925 case X86::RETI64:
926 case X86::TCRETURNdi:
927 case X86::TCRETURNri:
928 case X86::TCRETURNmi:
929 case X86::TCRETURNdi64:
930 case X86::TCRETURNri64:
931 case X86::TCRETURNmi64:
932 case X86::EH_RETURN:
933 case X86::EH_RETURN64: {
935 for (unsigned I = 0, E = MBBI->getNumOperands(); I != E; ++I) {
936 MachineOperand &MO = MBBI->getOperand(I);
937 if (!MO.isReg() || MO.isDef())
938 continue;
939 Register Reg = MO.getReg();
940 if (!Reg)
941 continue;
942 for (MCRegAliasIterator AI(Reg, this, true); AI.isValid(); ++AI)
943 Uses.insert(*AI);
944 }
945
946 for (auto CS : AvailableRegs)
947 if (!Uses.count(CS) && CS != X86::RIP && CS != X86::RSP && CS != X86::ESP)
948 return CS;
949 }
950 }
951
952 return 0;
953}
954
956 const X86FrameLowering *TFI = getFrameLowering(MF);
957 return TFI->hasFP(MF) ? FramePtr : StackPtr;
958}
959
960unsigned
962 const X86Subtarget &Subtarget = MF.getSubtarget<X86Subtarget>();
963 Register FrameReg = getFrameRegister(MF);
964 if (Subtarget.isTarget64BitILP32())
965 FrameReg = getX86SubSuperRegister(FrameReg, 32);
966 return FrameReg;
967}
968
969unsigned
971 const X86Subtarget &Subtarget = MF.getSubtarget<X86Subtarget>();
972 Register StackReg = getStackRegister();
973 if (Subtarget.isTarget64BitILP32())
974 StackReg = getX86SubSuperRegister(StackReg, 32);
975 return StackReg;
976}
977
979 const MachineRegisterInfo *MRI) {
980 if (VRM->hasShape(VirtReg))
981 return VRM->getShape(VirtReg);
982
983 const MachineOperand &Def = *MRI->def_begin(VirtReg);
984 MachineInstr *MI = const_cast<MachineInstr *>(Def.getParent());
985 unsigned OpCode = MI->getOpcode();
986 switch (OpCode) {
987 default:
988 llvm_unreachable("Unexpected machine instruction on tile register!");
989 break;
990 case X86::COPY: {
991 Register SrcReg = MI->getOperand(1).getReg();
992 ShapeT Shape = getTileShape(SrcReg, VRM, MRI);
993 VRM->assignVirt2Shape(VirtReg, Shape);
994 return Shape;
995 }
996 // We only collect the tile shape that is defined.
997 case X86::PTILELOADDV:
998 case X86::PTILELOADDT1V:
999 case X86::PTDPBSSDV:
1000 case X86::PTDPBSUDV:
1001 case X86::PTDPBUSDV:
1002 case X86::PTDPBUUDV:
1003 case X86::PTILEZEROV:
1004 case X86::PTDPBF16PSV:
1005 case X86::PTDPFP16PSV:
1006 MachineOperand &MO1 = MI->getOperand(1);
1007 MachineOperand &MO2 = MI->getOperand(2);
1008 ShapeT Shape(&MO1, &MO2, MRI);
1009 VRM->assignVirt2Shape(VirtReg, Shape);
1010 return Shape;
1011 }
1012}
1013
1015 ArrayRef<MCPhysReg> Order,
1017 const MachineFunction &MF,
1018 const VirtRegMap *VRM,
1019 const LiveRegMatrix *Matrix) const {
1020 const MachineRegisterInfo *MRI = &MF.getRegInfo();
1021 const TargetRegisterClass &RC = *MRI->getRegClass(VirtReg);
1022 bool BaseImplRetVal = TargetRegisterInfo::getRegAllocationHints(
1023 VirtReg, Order, Hints, MF, VRM, Matrix);
1024
1025 if (RC.getID() != X86::TILERegClassID)
1026 return BaseImplRetVal;
1027
1028 ShapeT VirtShape = getTileShape(VirtReg, const_cast<VirtRegMap *>(VRM), MRI);
1029 auto AddHint = [&](MCPhysReg PhysReg) {
1030 Register VReg = Matrix->getOneVReg(PhysReg);
1031 if (VReg == MCRegister::NoRegister) { // Not allocated yet
1032 Hints.push_back(PhysReg);
1033 return;
1034 }
1035 ShapeT PhysShape = getTileShape(VReg, const_cast<VirtRegMap *>(VRM), MRI);
1036 if (PhysShape == VirtShape)
1037 Hints.push_back(PhysReg);
1038 };
1039
1040 SmallSet<MCPhysReg, 4> CopyHints;
1041 CopyHints.insert(Hints.begin(), Hints.end());
1042 Hints.clear();
1043 for (auto Hint : CopyHints) {
1044 if (RC.contains(Hint) && !MRI->isReserved(Hint))
1045 AddHint(Hint);
1046 }
1047 for (MCPhysReg PhysReg : Order) {
1048 if (!CopyHints.count(PhysReg) && RC.contains(PhysReg) &&
1049 !MRI->isReserved(PhysReg))
1050 AddHint(PhysReg);
1051 }
1052
1053#define DEBUG_TYPE "tile-hint"
1054 LLVM_DEBUG({
1055 dbgs() << "Hints for virtual register " << format_hex(VirtReg, 8) << "\n";
1056 for (auto Hint : Hints) {
1057 dbgs() << "tmm" << Hint << ",";
1058 }
1059 dbgs() << "\n";
1060 });
1061#undef DEBUG_TYPE
1062
1063 return true;
1064}
unsigned SubReg
unsigned const MachineRegisterInfo * MRI
static bool isFuncletReturnInstr(const MachineInstr &MI)
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator MBBI
SmallPtrSet< MachineInstr *, 2 > Uses
basic Basic Alias true
This file implements the BitVector class.
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
#define LLVM_DEBUG(X)
Definition: Debug.h:101
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
Live Register Matrix
static cl::opt< bool > EnableBasePointer("m68k-use-base-pointer", cl::Hidden, cl::init(true), cl::desc("Enable use of a base pointer for complex stack frames"))
static bool CantUseSP(const MachineFrameInfo &MFI)
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
unsigned const TargetRegisterInfo * TRI
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file contains some templates that are useful if you are working with the STL at all.
This file defines the SmallSet class.
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
Definition: Value.cpp:467
static cl::opt< bool > EnableBasePointer("x86-use-base-pointer", cl::Hidden, cl::init(true), cl::desc("Enable use of a base pointer for complex stack frames"))
static bool tryOptimizeLEAtoMOV(MachineBasicBlock::iterator II)
static ShapeT getTileShape(Register VirtReg, VirtRegMap *VRM, const MachineRegisterInfo *MRI)
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition: Function.h:237
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition: Function.cpp:644
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, const DebugLoc &DL, MCRegister DestReg, MCRegister SrcReg, bool KillSrc) const override
Emit instructions to copy a pair of physical registers.
MCRegAliasIterator enumerates all registers aliasing Reg.
Wrapper class representing physical registers. Should be passed by value.
Definition: MCRegister.h:24
static constexpr unsigned NoRegister
Definition: MCRegister.h:43
bool isEHFuncletEntry() const
Returns true if this is the entry block of an EH funclet.
iterator getFirstTerminator()
Returns an iterator to the first terminator instruction of this basic block.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
bool hasVarSizedObjects() const
This method may be called any time after instruction selection is complete to determine if the stack ...
bool hasOpaqueSPAdjustment() const
Returns true if the function contains opaque dynamic stack adjustments.
bool isFixedObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to a fixed stack object.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
Representation of each machine instruction.
Definition: MachineInstr.h:68
MachineOperand class - Representation of each machine instruction operand.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
void ChangeToImmediate(int64_t ImmVal, unsigned TargetFlags=0)
ChangeToImmediate - Replace this operand with a new immediate operand of the specified value.
Register getReg() const
getReg - Returns the register number.
static bool clobbersPhysReg(const uint32_t *RegMask, MCRegister PhysReg)
clobbersPhysReg - Returns true if this RegMask clobbers PhysReg.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition: SmallSet.h:135
size_type count(const T &V) const
count - Return 1 if the element is in the set, 0 otherwise.
Definition: SmallSet.h:164
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
Definition: SmallSet.h:177
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:577
void push_back(const T &Elt)
Definition: SmallVector.h:416
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1200
static StackOffset getFixed(int64_t Fixed)
Definition: TypeSize.h:45
const TargetRegisterClass *const * sc_iterator
unsigned getID() const
Return the register class ID number.
bool contains(Register Reg) const
Return true if the specified register is included in this register class.
sc_iterator getSuperClasses() const
Returns a NULL-terminated list of super-classes.
bool hasSuperClassEq(const TargetRegisterClass *RC) const
Returns true if RC is a super-class of or equal to this class.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual bool canRealignStack(const MachineFunction &MF) const
True if the stack can be realigned for the target.
virtual bool shouldRewriteCopySrc(const TargetRegisterClass *DefRC, unsigned DefSubReg, const TargetRegisterClass *SrcRC, unsigned SrcSubReg) const
virtual bool getRegAllocationHints(Register VirtReg, ArrayRef< MCPhysReg > Order, SmallVectorImpl< MCPhysReg > &Hints, const MachineFunction &MF, const VirtRegMap *VRM=nullptr, const LiveRegMatrix *Matrix=nullptr) const
Get a list of 'hint' registers that the register allocator should try first when allocating a physica...
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:44
bool hasShape(Register virtReg) const
Definition: VirtRegMap.h:116
ShapeT getShape(Register virtReg) const
Definition: VirtRegMap.h:120
void assignVirt2Shape(Register virtReg, ShapeT shape)
Definition: VirtRegMap.h:125
bool hasFP(const MachineFunction &MF) const override
hasFP - Return true if the specified function should have a dedicated frame pointer register.
StackOffset getFrameIndexReferenceSP(const MachineFunction &MF, int FI, Register &SPReg, int Adjustment) const
StackOffset getFrameIndexReference(const MachineFunction &MF, int FI, Register &FrameReg) const override
getFrameIndexReference - This method should return the base register and offset used to reference a f...
bool Is64Bit
Is64Bit implies that x86_64 instructions are available.
bool Uses64BitFramePtr
True if the 64-bit frame or stack pointer should be used.
int getWin64EHFrameIndexRef(const MachineFunction &MF, int FI, Register &SPReg) const
X86MachineFunctionInfo - This class is derived from MachineFunction and contains private X86 target-s...
MachineInstr * getStackPtrSaveMI() const
const TargetRegisterClass * getPointerRegClass(const MachineFunction &MF, unsigned Kind=0) const override
getPointerRegClass - Returns a TargetRegisterClass used for pointer values.
unsigned getPtrSizedFrameRegister(const MachineFunction &MF) const
bool hasBasePointer(const MachineFunction &MF) const
const MCPhysReg * getCalleeSavedRegsViaCopy(const MachineFunction *MF) const
const TargetRegisterClass * getGPRsForTailCall(const MachineFunction &MF) const
getGPRsForTailCall - Returns a register class with registers that can be used in forming tail calls.
bool canRealignStack(const MachineFunction &MF) const override
BitVector getReservedRegs(const MachineFunction &MF) const override
getReservedRegs - Returns a bitset indexed by physical register number indicating if a register is a ...
Register getFrameRegister(const MachineFunction &MF) const override
unsigned findDeadCallerSavedReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI) const
findDeadCallerSavedReg - Return a caller-saved register that isn't live when it reaches the "return" ...
const uint32_t * getDarwinTLSCallPreservedMask() const
bool isTileRegisterClass(const TargetRegisterClass *RC) const
Return true if it is tile register class.
const uint32_t * getCallPreservedMask(const MachineFunction &MF, CallingConv::ID) const override
bool isArgumentRegister(const MachineFunction &MF, MCRegister Reg) const override
isArgumentReg - Returns true if Reg can be used as an argument to a function.
Register getStackRegister() const
const TargetRegisterClass * getLargestLegalSuperClass(const TargetRegisterClass *RC, const MachineFunction &MF) const override
const TargetRegisterClass * getMatchingSuperRegClass(const TargetRegisterClass *A, const TargetRegisterClass *B, unsigned Idx) const override
getMatchingSuperRegClass - Return a subclass of the specified register class A so that each register ...
unsigned getRegPressureLimit(const TargetRegisterClass *RC, MachineFunction &MF) const override
unsigned getPtrSizedStackRegister(const MachineFunction &MF) const
int getSEHRegNum(unsigned i) const
bool shouldRewriteCopySrc(const TargetRegisterClass *DefRC, unsigned DefSubReg, const TargetRegisterClass *SrcRC, unsigned SrcSubReg) const override
const TargetRegisterClass * getCrossCopyRegClass(const TargetRegisterClass *RC) const override
getCrossCopyRegClass - Returns a legal register class to copy a register in the specified class to or...
X86RegisterInfo(const Triple &TT)
Register getBaseRegister() const
bool getRegAllocationHints(Register VirtReg, ArrayRef< MCPhysReg > Order, SmallVectorImpl< MCPhysReg > &Hints, const MachineFunction &MF, const VirtRegMap *VRM, const LiveRegMatrix *Matrix) const override
void eliminateFrameIndex(MachineBasicBlock::iterator II, unsigned FIOperandNum, Register BaseReg, int FIOffset) const
const uint32_t * getNoPreservedMask() const override
bool isFixedRegister(const MachineFunction &MF, MCRegister PhysReg) const override
Returns true if PhysReg is a fixed register.
const TargetRegisterClass * getSubClassWithSubReg(const TargetRegisterClass *RC, unsigned Idx) const override
const MCPhysReg * getCalleeSavedRegs(const MachineFunction *MF) const override
getCalleeSavedRegs - Return a null-terminated list of all of the callee-save registers on this target...
void adjustStackMapLiveOutMask(uint32_t *Mask) const override
bool hasSSE1() const
Definition: X86Subtarget.h:199
const X86TargetLowering * getTargetLowering() const override
Definition: X86Subtarget.h:124
bool isTarget64BitILP32() const
Is this x86_64 with the ILP32 programming model (x32 ABI)?
Definition: X86Subtarget.h:179
bool isTarget64BitLP64() const
Is this x86_64 with the LP64 programming model (standard AMD64, no x32)?
Definition: X86Subtarget.h:184
bool hasAVX512() const
Definition: X86Subtarget.h:207
bool hasAVX() const
Definition: X86Subtarget.h:205
bool supportSwiftError() const override
Return true if the target supports swifterror attribute.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ X86_64_SysV
The C convention as specified in the x86-64 supplement to the System V ABI, used on most non-Windows ...
Definition: CallingConv.h:148
@ HiPE
Used by the High-Performance Erlang Compiler (HiPE).
Definition: CallingConv.h:53
@ CFGuard_Check
Special calling convention on Windows for calling the Control Guard Check ICall funtion.
Definition: CallingConv.h:82
@ PreserveMost
Used for runtime calls that preserves most registers.
Definition: CallingConv.h:63
@ AnyReg
Used for dynamic register based calls (e.g.
Definition: CallingConv.h:60
@ CXX_FAST_TLS
Used for access functions.
Definition: CallingConv.h:72
@ X86_INTR
x86 hardware interrupt context.
Definition: CallingConv.h:170
@ GHC
Used by the Glasgow Haskell Compiler (GHC).
Definition: CallingConv.h:50
@ Cold
Attempts to make code in the caller as efficient as possible under the assumption that the call is no...
Definition: CallingConv.h:47
@ PreserveAll
Used for runtime calls that preserves (almost) all registers.
Definition: CallingConv.h:66
@ Intel_OCL_BI
Used for Intel OpenCL built-ins.
Definition: CallingConv.h:144
@ Win64
The C convention as implemented on Windows/x86-64 and AArch64.
Definition: CallingConv.h:156
@ SwiftTail
This follows the Swift calling convention in how arguments are passed but guarantees tail calls will ...
Definition: CallingConv.h:87
@ X86_RegCall
Register calling convention used for parameters transfer optimization.
Definition: CallingConv.h:200
void initLLVMToSEHAndCVRegMapping(MCRegisterInfo *MRI)
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:445
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Offset
Definition: DWP.cpp:406
MCRegister getX86SubSuperRegister(MCRegister Reg, unsigned Size, bool High=false)
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1826
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:145
FormattedNumber format_hex(uint64_t N, unsigned Width, bool Upper=false)
format_hex - Output N as a fixed width hexadecimal.
Definition: Format.h:186