LLVM 20.0.0git
X86RegisterInfo.cpp
Go to the documentation of this file.
1//===-- X86RegisterInfo.cpp - X86 Register Information --------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the X86 implementation of the TargetRegisterInfo class.
10// This file is responsible for the frame pointer elimination optimization
11// on X86.
12//
13//===----------------------------------------------------------------------===//
14
15#include "X86RegisterInfo.h"
16#include "X86FrameLowering.h"
18#include "X86Subtarget.h"
19#include "llvm/ADT/BitVector.h"
20#include "llvm/ADT/STLExtras.h"
21#include "llvm/ADT/SmallSet.h"
31#include "llvm/IR/Constants.h"
32#include "llvm/IR/Function.h"
33#include "llvm/IR/Type.h"
34#include "llvm/MC/MCContext.h"
39
40using namespace llvm;
41
42#define GET_REGINFO_TARGET_DESC
43#include "X86GenRegisterInfo.inc"
44
45static cl::opt<bool>
46EnableBasePointer("x86-use-base-pointer", cl::Hidden, cl::init(true),
47 cl::desc("Enable use of a base pointer for complex stack frames"));
48
49static cl::opt<bool>
50 DisableRegAllocNDDHints("x86-disable-regalloc-hints-for-ndd", cl::Hidden,
51 cl::init(false),
52 cl::desc("Disable two address hints for register "
53 "allocation"));
54
56 : X86GenRegisterInfo((TT.isArch64Bit() ? X86::RIP : X86::EIP),
57 X86_MC::getDwarfRegFlavour(TT, false),
58 X86_MC::getDwarfRegFlavour(TT, true),
59 (TT.isArch64Bit() ? X86::RIP : X86::EIP)) {
61
62 // Cache some information.
63 Is64Bit = TT.isArch64Bit();
64 IsWin64 = Is64Bit && TT.isOSWindows();
65
66 // Use a callee-saved register as the base pointer. These registers must
67 // not conflict with any ABI requirements. For example, in 32-bit mode PIC
68 // requires GOT in the EBX register before function calls via PLT GOT pointer.
69 if (Is64Bit) {
70 SlotSize = 8;
71 // This matches the simplified 32-bit pointer code in the data layout
72 // computation.
73 // FIXME: Should use the data layout?
74 bool Use64BitReg = !TT.isX32();
75 StackPtr = Use64BitReg ? X86::RSP : X86::ESP;
76 FramePtr = Use64BitReg ? X86::RBP : X86::EBP;
77 BasePtr = Use64BitReg ? X86::RBX : X86::EBX;
78 } else {
79 SlotSize = 4;
80 StackPtr = X86::ESP;
81 FramePtr = X86::EBP;
82 BasePtr = X86::ESI;
83 }
84}
85
86int
88 return getEncodingValue(i);
89}
90
93 unsigned Idx) const {
94 // The sub_8bit sub-register index is more constrained in 32-bit mode.
95 // It behaves just like the sub_8bit_hi index.
96 if (!Is64Bit && Idx == X86::sub_8bit)
97 Idx = X86::sub_8bit_hi;
98
99 // Forward to TableGen's default version.
100 return X86GenRegisterInfo::getSubClassWithSubReg(RC, Idx);
101}
102
105 const TargetRegisterClass *B,
106 unsigned SubIdx) const {
107 // The sub_8bit sub-register index is more constrained in 32-bit mode.
108 if (!Is64Bit && SubIdx == X86::sub_8bit) {
109 A = X86GenRegisterInfo::getSubClassWithSubReg(A, X86::sub_8bit_hi);
110 if (!A)
111 return nullptr;
112 }
113 return X86GenRegisterInfo::getMatchingSuperRegClass(A, B, SubIdx);
114}
115
118 const MachineFunction &MF) const {
119 // Don't allow super-classes of GR8_NOREX. This class is only used after
120 // extracting sub_8bit_hi sub-registers. The H sub-registers cannot be copied
121 // to the full GR8 register class in 64-bit mode, so we cannot allow the
122 // reigster class inflation.
123 //
124 // The GR8_NOREX class is always used in a way that won't be constrained to a
125 // sub-class, so sub-classes like GR8_ABCD_L are allowed to expand to the
126 // full GR8 class.
127 if (RC == &X86::GR8_NOREXRegClass)
128 return RC;
129
130 const X86Subtarget &Subtarget = MF.getSubtarget<X86Subtarget>();
131
132 const TargetRegisterClass *Super = RC;
134 do {
135 switch (Super->getID()) {
136 case X86::FR32RegClassID:
137 case X86::FR64RegClassID:
138 // If AVX-512 isn't supported we should only inflate to these classes.
139 if (!Subtarget.hasAVX512() &&
140 getRegSizeInBits(*Super) == getRegSizeInBits(*RC))
141 return Super;
142 break;
143 case X86::VR128RegClassID:
144 case X86::VR256RegClassID:
145 // If VLX isn't supported we should only inflate to these classes.
146 if (!Subtarget.hasVLX() &&
147 getRegSizeInBits(*Super) == getRegSizeInBits(*RC))
148 return Super;
149 break;
150 case X86::VR128XRegClassID:
151 case X86::VR256XRegClassID:
152 // If VLX isn't support we shouldn't inflate to these classes.
153 if (Subtarget.hasVLX() &&
154 getRegSizeInBits(*Super) == getRegSizeInBits(*RC))
155 return Super;
156 break;
157 case X86::FR32XRegClassID:
158 case X86::FR64XRegClassID:
159 // If AVX-512 isn't support we shouldn't inflate to these classes.
160 if (Subtarget.hasAVX512() &&
161 getRegSizeInBits(*Super) == getRegSizeInBits(*RC))
162 return Super;
163 break;
164 case X86::GR8RegClassID:
165 case X86::GR16RegClassID:
166 case X86::GR32RegClassID:
167 case X86::GR64RegClassID:
168 case X86::GR8_NOREX2RegClassID:
169 case X86::GR16_NOREX2RegClassID:
170 case X86::GR32_NOREX2RegClassID:
171 case X86::GR64_NOREX2RegClassID:
172 case X86::RFP32RegClassID:
173 case X86::RFP64RegClassID:
174 case X86::RFP80RegClassID:
175 case X86::VR512_0_15RegClassID:
176 case X86::VR512RegClassID:
177 // Don't return a super-class that would shrink the spill size.
178 // That can happen with the vector and float classes.
179 if (getRegSizeInBits(*Super) == getRegSizeInBits(*RC))
180 return Super;
181 }
182 Super = *I++;
183 } while (Super);
184 return RC;
185}
186
189 unsigned Kind) const {
190 const X86Subtarget &Subtarget = MF.getSubtarget<X86Subtarget>();
191 switch (Kind) {
192 default: llvm_unreachable("Unexpected Kind in getPointerRegClass!");
193 case 0: // Normal GPRs.
194 if (Subtarget.isTarget64BitLP64())
195 return &X86::GR64RegClass;
196 // If the target is 64bit but we have been told to use 32bit addresses,
197 // we can still use 64-bit register as long as we know the high bits
198 // are zeros.
199 // Reflect that in the returned register class.
200 if (Is64Bit) {
201 // When the target also allows 64-bit frame pointer and we do have a
202 // frame, this is fine to use it for the address accesses as well.
203 const X86FrameLowering *TFI = getFrameLowering(MF);
204 return TFI->hasFP(MF) && TFI->Uses64BitFramePtr
205 ? &X86::LOW32_ADDR_ACCESS_RBPRegClass
206 : &X86::LOW32_ADDR_ACCESSRegClass;
207 }
208 return &X86::GR32RegClass;
209 case 1: // Normal GPRs except the stack pointer (for encoding reasons).
210 if (Subtarget.isTarget64BitLP64())
211 return &X86::GR64_NOSPRegClass;
212 // NOSP does not contain RIP, so no special case here.
213 return &X86::GR32_NOSPRegClass;
214 case 2: // NOREX GPRs.
215 if (Subtarget.isTarget64BitLP64())
216 return &X86::GR64_NOREXRegClass;
217 return &X86::GR32_NOREXRegClass;
218 case 3: // NOREX GPRs except the stack pointer (for encoding reasons).
219 if (Subtarget.isTarget64BitLP64())
220 return &X86::GR64_NOREX_NOSPRegClass;
221 // NOSP does not contain RIP, so no special case here.
222 return &X86::GR32_NOREX_NOSPRegClass;
223 case 4: // Available for tailcall (not callee-saved GPRs).
224 return getGPRsForTailCall(MF);
225 }
226}
227
229 unsigned DefSubReg,
230 const TargetRegisterClass *SrcRC,
231 unsigned SrcSubReg) const {
232 // Prevent rewriting a copy where the destination size is larger than the
233 // input size. See PR41619.
234 // FIXME: Should this be factored into the base implementation somehow.
235 if (DefRC->hasSuperClassEq(&X86::GR64RegClass) && DefSubReg == 0 &&
236 SrcRC->hasSuperClassEq(&X86::GR64RegClass) && SrcSubReg == X86::sub_32bit)
237 return false;
238
239 return TargetRegisterInfo::shouldRewriteCopySrc(DefRC, DefSubReg,
240 SrcRC, SrcSubReg);
241}
242
245 const Function &F = MF.getFunction();
246 if (IsWin64 || (F.getCallingConv() == CallingConv::Win64))
247 return &X86::GR64_TCW64RegClass;
248 else if (Is64Bit)
249 return &X86::GR64_TCRegClass;
250
251 bool hasHipeCC = (F.getCallingConv() == CallingConv::HiPE);
252 if (hasHipeCC)
253 return &X86::GR32RegClass;
254 return &X86::GR32_TCRegClass;
255}
256
259 if (RC == &X86::CCRRegClass) {
260 if (Is64Bit)
261 return &X86::GR64RegClass;
262 else
263 return &X86::GR32RegClass;
264 }
265 return RC;
266}
267
268unsigned
270 MachineFunction &MF) const {
271 const X86FrameLowering *TFI = getFrameLowering(MF);
272
273 unsigned FPDiff = TFI->hasFP(MF) ? 1 : 0;
274 switch (RC->getID()) {
275 default:
276 return 0;
277 case X86::GR32RegClassID:
278 return 4 - FPDiff;
279 case X86::GR64RegClassID:
280 return 12 - FPDiff;
281 case X86::VR128RegClassID:
282 return Is64Bit ? 10 : 4;
283 case X86::VR64RegClassID:
284 return 4;
285 }
286}
287
288const MCPhysReg *
290 assert(MF && "MachineFunction required");
291
292 const X86Subtarget &Subtarget = MF->getSubtarget<X86Subtarget>();
293 const Function &F = MF->getFunction();
294 bool HasSSE = Subtarget.hasSSE1();
295 bool HasAVX = Subtarget.hasAVX();
296 bool HasAVX512 = Subtarget.hasAVX512();
297 bool CallsEHReturn = MF->callsEHReturn();
298
299 CallingConv::ID CC = F.getCallingConv();
300
301 // If attribute NoCallerSavedRegisters exists then we set X86_INTR calling
302 // convention because it has the CSR list.
303 if (MF->getFunction().hasFnAttribute("no_caller_saved_registers"))
305
306 // If atribute specified, override the CSRs normally specified by the
307 // calling convention and use the empty set instead.
308 if (MF->getFunction().hasFnAttribute("no_callee_saved_registers"))
309 return CSR_NoRegs_SaveList;
310
311 switch (CC) {
312 case CallingConv::GHC:
314 return CSR_NoRegs_SaveList;
316 if (HasAVX)
317 return CSR_64_AllRegs_AVX_SaveList;
318 return CSR_64_AllRegs_SaveList;
320 return IsWin64 ? CSR_Win64_RT_MostRegs_SaveList
321 : CSR_64_RT_MostRegs_SaveList;
323 if (HasAVX)
324 return CSR_64_RT_AllRegs_AVX_SaveList;
325 return CSR_64_RT_AllRegs_SaveList;
327 return CSR_64_NoneRegs_SaveList;
329 if (Is64Bit)
330 return MF->getInfo<X86MachineFunctionInfo>()->isSplitCSR() ?
331 CSR_64_CXX_TLS_Darwin_PE_SaveList : CSR_64_TLS_Darwin_SaveList;
332 break;
334 if (HasAVX512 && IsWin64)
335 return CSR_Win64_Intel_OCL_BI_AVX512_SaveList;
336 if (HasAVX512 && Is64Bit)
337 return CSR_64_Intel_OCL_BI_AVX512_SaveList;
338 if (HasAVX && IsWin64)
339 return CSR_Win64_Intel_OCL_BI_AVX_SaveList;
340 if (HasAVX && Is64Bit)
341 return CSR_64_Intel_OCL_BI_AVX_SaveList;
342 if (!HasAVX && !IsWin64 && Is64Bit)
343 return CSR_64_Intel_OCL_BI_SaveList;
344 break;
345 }
347 if (Is64Bit) {
348 if (IsWin64) {
349 return (HasSSE ? CSR_Win64_RegCall_SaveList :
350 CSR_Win64_RegCall_NoSSE_SaveList);
351 } else {
352 return (HasSSE ? CSR_SysV64_RegCall_SaveList :
353 CSR_SysV64_RegCall_NoSSE_SaveList);
354 }
355 } else {
356 return (HasSSE ? CSR_32_RegCall_SaveList :
357 CSR_32_RegCall_NoSSE_SaveList);
358 }
360 assert(!Is64Bit && "CFGuard check mechanism only used on 32-bit X86");
361 return (HasSSE ? CSR_Win32_CFGuard_Check_SaveList
362 : CSR_Win32_CFGuard_Check_NoSSE_SaveList);
364 if (Is64Bit)
365 return CSR_64_MostRegs_SaveList;
366 break;
368 if (!HasSSE)
369 return CSR_Win64_NoSSE_SaveList;
370 return CSR_Win64_SaveList;
372 if (!Is64Bit)
373 return CSR_32_SaveList;
374 return IsWin64 ? CSR_Win64_SwiftTail_SaveList : CSR_64_SwiftTail_SaveList;
376 if (CallsEHReturn)
377 return CSR_64EHRet_SaveList;
378 return CSR_64_SaveList;
380 if (Is64Bit) {
381 if (HasAVX512)
382 return CSR_64_AllRegs_AVX512_SaveList;
383 if (HasAVX)
384 return CSR_64_AllRegs_AVX_SaveList;
385 if (HasSSE)
386 return CSR_64_AllRegs_SaveList;
387 return CSR_64_AllRegs_NoSSE_SaveList;
388 } else {
389 if (HasAVX512)
390 return CSR_32_AllRegs_AVX512_SaveList;
391 if (HasAVX)
392 return CSR_32_AllRegs_AVX_SaveList;
393 if (HasSSE)
394 return CSR_32_AllRegs_SSE_SaveList;
395 return CSR_32_AllRegs_SaveList;
396 }
397 default:
398 break;
399 }
400
401 if (Is64Bit) {
402 bool IsSwiftCC = Subtarget.getTargetLowering()->supportSwiftError() &&
403 F.getAttributes().hasAttrSomewhere(Attribute::SwiftError);
404 if (IsSwiftCC)
405 return IsWin64 ? CSR_Win64_SwiftError_SaveList
406 : CSR_64_SwiftError_SaveList;
407
408 if (IsWin64)
409 return HasSSE ? CSR_Win64_SaveList : CSR_Win64_NoSSE_SaveList;
410 if (CallsEHReturn)
411 return CSR_64EHRet_SaveList;
412 return CSR_64_SaveList;
413 }
414
415 return CallsEHReturn ? CSR_32EHRet_SaveList : CSR_32_SaveList;
416}
417
419 const MachineFunction *MF) const {
420 assert(MF && "Invalid MachineFunction pointer.");
423 return CSR_64_CXX_TLS_Darwin_ViaCopy_SaveList;
424 return nullptr;
425}
426
427const uint32_t *
429 CallingConv::ID CC) const {
430 const X86Subtarget &Subtarget = MF.getSubtarget<X86Subtarget>();
431 bool HasSSE = Subtarget.hasSSE1();
432 bool HasAVX = Subtarget.hasAVX();
433 bool HasAVX512 = Subtarget.hasAVX512();
434
435 switch (CC) {
436 case CallingConv::GHC:
438 return CSR_NoRegs_RegMask;
440 if (HasAVX)
441 return CSR_64_AllRegs_AVX_RegMask;
442 return CSR_64_AllRegs_RegMask;
444 return IsWin64 ? CSR_Win64_RT_MostRegs_RegMask : CSR_64_RT_MostRegs_RegMask;
446 if (HasAVX)
447 return CSR_64_RT_AllRegs_AVX_RegMask;
448 return CSR_64_RT_AllRegs_RegMask;
450 return CSR_64_NoneRegs_RegMask;
452 if (Is64Bit)
453 return CSR_64_TLS_Darwin_RegMask;
454 break;
456 if (HasAVX512 && IsWin64)
457 return CSR_Win64_Intel_OCL_BI_AVX512_RegMask;
458 if (HasAVX512 && Is64Bit)
459 return CSR_64_Intel_OCL_BI_AVX512_RegMask;
460 if (HasAVX && IsWin64)
461 return CSR_Win64_Intel_OCL_BI_AVX_RegMask;
462 if (HasAVX && Is64Bit)
463 return CSR_64_Intel_OCL_BI_AVX_RegMask;
464 if (!HasAVX && !IsWin64 && Is64Bit)
465 return CSR_64_Intel_OCL_BI_RegMask;
466 break;
467 }
469 if (Is64Bit) {
470 if (IsWin64) {
471 return (HasSSE ? CSR_Win64_RegCall_RegMask :
472 CSR_Win64_RegCall_NoSSE_RegMask);
473 } else {
474 return (HasSSE ? CSR_SysV64_RegCall_RegMask :
475 CSR_SysV64_RegCall_NoSSE_RegMask);
476 }
477 } else {
478 return (HasSSE ? CSR_32_RegCall_RegMask :
479 CSR_32_RegCall_NoSSE_RegMask);
480 }
482 assert(!Is64Bit && "CFGuard check mechanism only used on 32-bit X86");
483 return (HasSSE ? CSR_Win32_CFGuard_Check_RegMask
484 : CSR_Win32_CFGuard_Check_NoSSE_RegMask);
486 if (Is64Bit)
487 return CSR_64_MostRegs_RegMask;
488 break;
490 return CSR_Win64_RegMask;
492 if (!Is64Bit)
493 return CSR_32_RegMask;
494 return IsWin64 ? CSR_Win64_SwiftTail_RegMask : CSR_64_SwiftTail_RegMask;
496 return CSR_64_RegMask;
498 if (Is64Bit) {
499 if (HasAVX512)
500 return CSR_64_AllRegs_AVX512_RegMask;
501 if (HasAVX)
502 return CSR_64_AllRegs_AVX_RegMask;
503 if (HasSSE)
504 return CSR_64_AllRegs_RegMask;
505 return CSR_64_AllRegs_NoSSE_RegMask;
506 } else {
507 if (HasAVX512)
508 return CSR_32_AllRegs_AVX512_RegMask;
509 if (HasAVX)
510 return CSR_32_AllRegs_AVX_RegMask;
511 if (HasSSE)
512 return CSR_32_AllRegs_SSE_RegMask;
513 return CSR_32_AllRegs_RegMask;
514 }
515 default:
516 break;
517 }
518
519 // Unlike getCalleeSavedRegs(), we don't have MMI so we can't check
520 // callsEHReturn().
521 if (Is64Bit) {
522 const Function &F = MF.getFunction();
523 bool IsSwiftCC = Subtarget.getTargetLowering()->supportSwiftError() &&
524 F.getAttributes().hasAttrSomewhere(Attribute::SwiftError);
525 if (IsSwiftCC)
526 return IsWin64 ? CSR_Win64_SwiftError_RegMask : CSR_64_SwiftError_RegMask;
527
528 return IsWin64 ? CSR_Win64_RegMask : CSR_64_RegMask;
529 }
530
531 return CSR_32_RegMask;
532}
533
534const uint32_t*
536 return CSR_NoRegs_RegMask;
537}
538
540 return CSR_64_TLS_Darwin_RegMask;
541}
542
544 BitVector Reserved(getNumRegs());
545 const X86FrameLowering *TFI = getFrameLowering(MF);
546
547 // Set the floating point control register as reserved.
548 Reserved.set(X86::FPCW);
549
550 // Set the floating point status register as reserved.
551 Reserved.set(X86::FPSW);
552
553 // Set the SIMD floating point control register as reserved.
554 Reserved.set(X86::MXCSR);
555
556 // Set the stack-pointer register and its aliases as reserved.
557 for (const MCPhysReg &SubReg : subregs_inclusive(X86::RSP))
558 Reserved.set(SubReg);
559
560 // Set the Shadow Stack Pointer as reserved.
561 Reserved.set(X86::SSP);
562
563 // Set the instruction pointer register and its aliases as reserved.
564 for (const MCPhysReg &SubReg : subregs_inclusive(X86::RIP))
565 Reserved.set(SubReg);
566
567 // Set the frame-pointer register and its aliases as reserved if needed.
568 if (TFI->hasFP(MF)) {
571 SMLoc(),
572 "Frame pointer clobbered by function invoke is not supported.");
573
574 for (const MCPhysReg &SubReg : subregs_inclusive(X86::RBP))
575 Reserved.set(SubReg);
576 }
577
578 // Set the base-pointer register and its aliases as reserved if needed.
579 if (hasBasePointer(MF)) {
582 "Stack realignment in presence of dynamic "
583 "allocas is not supported with "
584 "this calling convention.");
585
587 for (const MCPhysReg &SubReg : subregs_inclusive(BasePtr))
588 Reserved.set(SubReg);
589 }
590
591 // Mark the segment registers as reserved.
592 Reserved.set(X86::CS);
593 Reserved.set(X86::SS);
594 Reserved.set(X86::DS);
595 Reserved.set(X86::ES);
596 Reserved.set(X86::FS);
597 Reserved.set(X86::GS);
598
599 // Mark the floating point stack registers as reserved.
600 for (unsigned n = 0; n != 8; ++n)
601 Reserved.set(X86::ST0 + n);
602
603 // Reserve the registers that only exist in 64-bit mode.
604 if (!Is64Bit) {
605 // These 8-bit registers are part of the x86-64 extension even though their
606 // super-registers are old 32-bits.
607 Reserved.set(X86::SIL);
608 Reserved.set(X86::DIL);
609 Reserved.set(X86::BPL);
610 Reserved.set(X86::SPL);
611 Reserved.set(X86::SIH);
612 Reserved.set(X86::DIH);
613 Reserved.set(X86::BPH);
614 Reserved.set(X86::SPH);
615
616 for (unsigned n = 0; n != 8; ++n) {
617 // R8, R9, ...
618 for (MCRegAliasIterator AI(X86::R8 + n, this, true); AI.isValid(); ++AI)
619 Reserved.set(*AI);
620
621 // XMM8, XMM9, ...
622 for (MCRegAliasIterator AI(X86::XMM8 + n, this, true); AI.isValid(); ++AI)
623 Reserved.set(*AI);
624 }
625 }
626 if (!Is64Bit || !MF.getSubtarget<X86Subtarget>().hasAVX512()) {
627 for (unsigned n = 0; n != 16; ++n) {
628 for (MCRegAliasIterator AI(X86::XMM16 + n, this, true); AI.isValid();
629 ++AI)
630 Reserved.set(*AI);
631 }
632 }
633
634 // Reserve the extended general purpose registers.
635 if (!Is64Bit || !MF.getSubtarget<X86Subtarget>().hasEGPR())
636 Reserved.set(X86::R16, X86::R31WH + 1);
637
639 for (MCRegAliasIterator AI(X86::R14, this, true); AI.isValid(); ++AI)
640 Reserved.set(*AI);
641 for (MCRegAliasIterator AI(X86::R15, this, true); AI.isValid(); ++AI)
642 Reserved.set(*AI);
643 }
644
645 assert(checkAllSuperRegsMarked(Reserved,
646 {X86::SIL, X86::DIL, X86::BPL, X86::SPL,
647 X86::SIH, X86::DIH, X86::BPH, X86::SPH}));
648 return Reserved;
649}
650
652 // All existing Intel CPUs that support AMX support AVX512 and all existing
653 // Intel CPUs that support APX support AMX. AVX512 implies AVX.
654 //
655 // We enumerate the registers in X86GenRegisterInfo.inc in this order:
656 //
657 // Registers before AVX512,
658 // AVX512 registers (X/YMM16-31, ZMM0-31, K registers)
659 // AMX registers (TMM)
660 // APX registers (R16-R31)
661 //
662 // and try to return the minimum number of registers supported by the target.
663 static_assert((X86::R15WH + 1 == X86::YMM0) && (X86::YMM15 + 1 == X86::K0) &&
664 (X86::K6_K7 + 1 == X86::TMMCFG) &&
665 (X86::TMM7 + 1 == X86::R16) &&
666 (X86::R31WH + 1 == X86::NUM_TARGET_REGS),
667 "Register number may be incorrect");
668
669 const X86Subtarget &ST = MF.getSubtarget<X86Subtarget>();
670 if (ST.hasEGPR())
671 return X86::NUM_TARGET_REGS;
672 if (ST.hasAMXTILE())
673 return X86::TMM7 + 1;
674 if (ST.hasAVX512())
675 return X86::K6_K7 + 1;
676 if (ST.hasAVX())
677 return X86::YMM15 + 1;
678 return X86::R15WH + 1;
679}
680
682 MCRegister Reg) const {
683 const X86Subtarget &ST = MF.getSubtarget<X86Subtarget>();
684 const TargetRegisterInfo &TRI = *ST.getRegisterInfo();
685 auto IsSubReg = [&](MCRegister RegA, MCRegister RegB) {
686 return TRI.isSuperOrSubRegisterEq(RegA, RegB);
687 };
688
689 if (!ST.is64Bit())
690 return llvm::any_of(
691 SmallVector<MCRegister>{X86::EAX, X86::ECX, X86::EDX},
692 [&](MCRegister &RegA) { return IsSubReg(RegA, Reg); }) ||
693 (ST.hasMMX() && X86::VR64RegClass.contains(Reg));
694
696
697 if (CC == CallingConv::X86_64_SysV && IsSubReg(X86::RAX, Reg))
698 return true;
699
700 if (llvm::any_of(
701 SmallVector<MCRegister>{X86::RDX, X86::RCX, X86::R8, X86::R9},
702 [&](MCRegister &RegA) { return IsSubReg(RegA, Reg); }))
703 return true;
704
705 if (CC != CallingConv::Win64 &&
706 llvm::any_of(SmallVector<MCRegister>{X86::RDI, X86::RSI},
707 [&](MCRegister &RegA) { return IsSubReg(RegA, Reg); }))
708 return true;
709
710 if (ST.hasSSE1() &&
711 llvm::any_of(SmallVector<MCRegister>{X86::XMM0, X86::XMM1, X86::XMM2,
712 X86::XMM3, X86::XMM4, X86::XMM5,
713 X86::XMM6, X86::XMM7},
714 [&](MCRegister &RegA) { return IsSubReg(RegA, Reg); }))
715 return true;
716
717 return X86GenRegisterInfo::isArgumentRegister(MF, Reg);
718}
719
721 MCRegister PhysReg) const {
722 const X86Subtarget &ST = MF.getSubtarget<X86Subtarget>();
723 const TargetRegisterInfo &TRI = *ST.getRegisterInfo();
724
725 // Stack pointer.
726 if (TRI.isSuperOrSubRegisterEq(X86::RSP, PhysReg))
727 return true;
728
729 // Don't use the frame pointer if it's being used.
730 const X86FrameLowering &TFI = *getFrameLowering(MF);
731 if (TFI.hasFP(MF) && TRI.isSuperOrSubRegisterEq(X86::RBP, PhysReg))
732 return true;
733
734 return X86GenRegisterInfo::isFixedRegister(MF, PhysReg);
735}
736
738 return RC->getID() == X86::TILERegClassID;
739}
740
742 // Check if the EFLAGS register is marked as live-out. This shouldn't happen,
743 // because the calling convention defines the EFLAGS register as NOT
744 // preserved.
745 //
746 // Unfortunatelly the EFLAGS show up as live-out after branch folding. Adding
747 // an assert to track this and clear the register afterwards to avoid
748 // unnecessary crashes during release builds.
749 assert(!(Mask[X86::EFLAGS / 32] & (1U << (X86::EFLAGS % 32))) &&
750 "EFLAGS are not live-out from a patchpoint.");
751
752 // Also clean other registers that don't need preserving (IP).
753 for (auto Reg : {X86::EFLAGS, X86::RIP, X86::EIP, X86::IP})
754 Mask[Reg / 32] &= ~(1U << (Reg % 32));
755}
756
757//===----------------------------------------------------------------------===//
758// Stack Frame Processing methods
759//===----------------------------------------------------------------------===//
760
761static bool CantUseSP(const MachineFrameInfo &MFI) {
762 return MFI.hasVarSizedObjects() || MFI.hasOpaqueSPAdjustment();
763}
764
767 // We have a virtual register to reference argument, and don't need base
768 // pointer.
769 if (X86FI->getStackPtrSaveMI() != nullptr)
770 return false;
771
772 if (X86FI->hasPreallocatedCall())
773 return true;
774
775 const MachineFrameInfo &MFI = MF.getFrameInfo();
776
778 return false;
779
780 // When we need stack realignment, we can't address the stack from the frame
781 // pointer. When we have dynamic allocas or stack-adjusting inline asm, we
782 // can't address variables from the stack pointer. MS inline asm can
783 // reference locals while also adjusting the stack pointer. When we can't
784 // use both the SP and the FP, we need a separate base pointer register.
785 bool CantUseFP = hasStackRealignment(MF);
786 return CantUseFP && CantUseSP(MFI);
787}
788
791 return false;
792
793 const MachineFrameInfo &MFI = MF.getFrameInfo();
794 const MachineRegisterInfo *MRI = &MF.getRegInfo();
795
796 // Stack realignment requires a frame pointer. If we already started
797 // register allocation with frame pointer elimination, it is too late now.
798 if (!MRI->canReserveReg(FramePtr))
799 return false;
800
801 // If a base pointer is necessary. Check that it isn't too late to reserve
802 // it.
803 if (CantUseSP(MFI))
804 return MRI->canReserveReg(BasePtr);
805 return true;
806}
807
810 return true;
811
812 return !Is64Bit && MF.getFunction().getCallingConv() == CallingConv::X86_INTR;
813}
814
815// tryOptimizeLEAtoMOV - helper function that tries to replace a LEA instruction
816// of the form 'lea (%esp), %ebx' --> 'mov %esp, %ebx'.
817// TODO: In this case we should be really trying first to entirely eliminate
818// this instruction which is a plain copy.
820 MachineInstr &MI = *II;
821 unsigned Opc = II->getOpcode();
822 // Check if this is a LEA of the form 'lea (%esp), %ebx'
823 if ((Opc != X86::LEA32r && Opc != X86::LEA64r && Opc != X86::LEA64_32r) ||
824 MI.getOperand(2).getImm() != 1 ||
825 MI.getOperand(3).getReg() != X86::NoRegister ||
826 MI.getOperand(4).getImm() != 0 ||
827 MI.getOperand(5).getReg() != X86::NoRegister)
828 return false;
829 Register BasePtr = MI.getOperand(1).getReg();
830 // In X32 mode, ensure the base-pointer is a 32-bit operand, so the LEA will
831 // be replaced with a 32-bit operand MOV which will zero extend the upper
832 // 32-bits of the super register.
833 if (Opc == X86::LEA64_32r)
834 BasePtr = getX86SubSuperRegister(BasePtr, 32);
835 Register NewDestReg = MI.getOperand(0).getReg();
836 const X86InstrInfo *TII =
837 MI.getParent()->getParent()->getSubtarget<X86Subtarget>().getInstrInfo();
838 TII->copyPhysReg(*MI.getParent(), II, MI.getDebugLoc(), NewDestReg, BasePtr,
839 MI.getOperand(1).isKill());
840 MI.eraseFromParent();
841 return true;
842}
843
845 switch (MI.getOpcode()) {
846 case X86::CATCHRET:
847 case X86::CLEANUPRET:
848 return true;
849 default:
850 return false;
851 }
852 llvm_unreachable("impossible");
853}
854
856 unsigned FIOperandNum,
857 Register BaseReg,
858 int FIOffset) const {
859 MachineInstr &MI = *II;
860 unsigned Opc = MI.getOpcode();
861 if (Opc == TargetOpcode::LOCAL_ESCAPE) {
862 MachineOperand &FI = MI.getOperand(FIOperandNum);
863 FI.ChangeToImmediate(FIOffset);
864 return;
865 }
866
867 MI.getOperand(FIOperandNum).ChangeToRegister(BaseReg, false);
868
869 // The frame index format for stackmaps and patchpoints is different from the
870 // X86 format. It only has a FI and an offset.
871 if (Opc == TargetOpcode::STACKMAP || Opc == TargetOpcode::PATCHPOINT) {
872 assert(BasePtr == FramePtr && "Expected the FP as base register");
873 int64_t Offset = MI.getOperand(FIOperandNum + 1).getImm() + FIOffset;
874 MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset);
875 return;
876 }
877
878 if (MI.getOperand(FIOperandNum + 3).isImm()) {
879 // Offset is a 32-bit integer.
880 int Imm = (int)(MI.getOperand(FIOperandNum + 3).getImm());
881 int Offset = FIOffset + Imm;
882 assert((!Is64Bit || isInt<32>((long long)FIOffset + Imm)) &&
883 "Requesting 64-bit offset in 32-bit immediate!");
884 if (Offset != 0)
885 MI.getOperand(FIOperandNum + 3).ChangeToImmediate(Offset);
886 } else {
887 // Offset is symbolic. This is extremely rare.
889 FIOffset + (uint64_t)MI.getOperand(FIOperandNum + 3).getOffset();
890 MI.getOperand(FIOperandNum + 3).setOffset(Offset);
891 }
892}
893
894bool
896 int SPAdj, unsigned FIOperandNum,
897 RegScavenger *RS) const {
898 MachineInstr &MI = *II;
899 MachineBasicBlock &MBB = *MI.getParent();
902 bool IsEHFuncletEpilogue = MBBI == MBB.end() ? false
904 const X86FrameLowering *TFI = getFrameLowering(MF);
905 int FrameIndex = MI.getOperand(FIOperandNum).getIndex();
906
907 // Determine base register and offset.
908 int FIOffset;
909 Register BasePtr;
910 if (MI.isReturn()) {
911 assert((!hasStackRealignment(MF) ||
912 MF.getFrameInfo().isFixedObjectIndex(FrameIndex)) &&
913 "Return instruction can only reference SP relative frame objects");
914 FIOffset =
915 TFI->getFrameIndexReferenceSP(MF, FrameIndex, BasePtr, 0).getFixed();
916 } else if (TFI->Is64Bit && (MBB.isEHFuncletEntry() || IsEHFuncletEpilogue)) {
917 FIOffset = TFI->getWin64EHFrameIndexRef(MF, FrameIndex, BasePtr);
918 } else {
919 FIOffset = TFI->getFrameIndexReference(MF, FrameIndex, BasePtr).getFixed();
920 }
921
922 // LOCAL_ESCAPE uses a single offset, with no register. It only works in the
923 // simple FP case, and doesn't work with stack realignment. On 32-bit, the
924 // offset is from the traditional base pointer location. On 64-bit, the
925 // offset is from the SP at the end of the prologue, not the FP location. This
926 // matches the behavior of llvm.frameaddress.
927 unsigned Opc = MI.getOpcode();
928 if (Opc == TargetOpcode::LOCAL_ESCAPE) {
929 MachineOperand &FI = MI.getOperand(FIOperandNum);
930 FI.ChangeToImmediate(FIOffset);
931 return false;
932 }
933
934 // For LEA64_32r when BasePtr is 32-bits (X32) we can use full-size 64-bit
935 // register as source operand, semantic is the same and destination is
936 // 32-bits. It saves one byte per lea in code since 0x67 prefix is avoided.
937 // Don't change BasePtr since it is used later for stack adjustment.
938 Register MachineBasePtr = BasePtr;
939 if (Opc == X86::LEA64_32r && X86::GR32RegClass.contains(BasePtr))
940 MachineBasePtr = getX86SubSuperRegister(BasePtr, 64);
941
942 // This must be part of a four operand memory reference. Replace the
943 // FrameIndex with base register. Add an offset to the offset.
944 MI.getOperand(FIOperandNum).ChangeToRegister(MachineBasePtr, false);
945
946 if (BasePtr == StackPtr)
947 FIOffset += SPAdj;
948
949 // The frame index format for stackmaps and patchpoints is different from the
950 // X86 format. It only has a FI and an offset.
951 if (Opc == TargetOpcode::STACKMAP || Opc == TargetOpcode::PATCHPOINT) {
952 assert(BasePtr == FramePtr && "Expected the FP as base register");
953 int64_t Offset = MI.getOperand(FIOperandNum + 1).getImm() + FIOffset;
954 MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset);
955 return false;
956 }
957
958 if (MI.getOperand(FIOperandNum+3).isImm()) {
959 // Offset is a 32-bit integer.
960 int Imm = (int)(MI.getOperand(FIOperandNum + 3).getImm());
961 int Offset = FIOffset + Imm;
962 assert((!Is64Bit || isInt<32>((long long)FIOffset + Imm)) &&
963 "Requesting 64-bit offset in 32-bit immediate!");
964 if (Offset != 0 || !tryOptimizeLEAtoMOV(II))
965 MI.getOperand(FIOperandNum + 3).ChangeToImmediate(Offset);
966 } else {
967 // Offset is symbolic. This is extremely rare.
968 uint64_t Offset = FIOffset +
969 (uint64_t)MI.getOperand(FIOperandNum+3).getOffset();
970 MI.getOperand(FIOperandNum + 3).setOffset(Offset);
971 }
972 return false;
973}
974
977 const MachineFunction *MF = MBB.getParent();
978 if (MF->callsEHReturn())
979 return 0;
980
981 const TargetRegisterClass &AvailableRegs = *getGPRsForTailCall(*MF);
982
983 if (MBBI == MBB.end())
984 return 0;
985
986 switch (MBBI->getOpcode()) {
987 default:
988 return 0;
989 case TargetOpcode::PATCHABLE_RET:
990 case X86::RET:
991 case X86::RET32:
992 case X86::RET64:
993 case X86::RETI32:
994 case X86::RETI64:
995 case X86::TCRETURNdi:
996 case X86::TCRETURNri:
997 case X86::TCRETURNmi:
998 case X86::TCRETURNdi64:
999 case X86::TCRETURNri64:
1000 case X86::TCRETURNmi64:
1001 case X86::EH_RETURN:
1002 case X86::EH_RETURN64: {
1004 for (MachineOperand &MO : MBBI->operands()) {
1005 if (!MO.isReg() || MO.isDef())
1006 continue;
1007 Register Reg = MO.getReg();
1008 if (!Reg)
1009 continue;
1010 for (MCRegAliasIterator AI(Reg, this, true); AI.isValid(); ++AI)
1011 Uses.insert(*AI);
1012 }
1013
1014 for (auto CS : AvailableRegs)
1015 if (!Uses.count(CS) && CS != X86::RIP && CS != X86::RSP && CS != X86::ESP)
1016 return CS;
1017 }
1018 }
1019
1020 return 0;
1021}
1022
1024 const X86FrameLowering *TFI = getFrameLowering(MF);
1025 return TFI->hasFP(MF) ? FramePtr : StackPtr;
1026}
1027
1028unsigned
1030 const X86Subtarget &Subtarget = MF.getSubtarget<X86Subtarget>();
1031 Register FrameReg = getFrameRegister(MF);
1032 if (Subtarget.isTarget64BitILP32())
1033 FrameReg = getX86SubSuperRegister(FrameReg, 32);
1034 return FrameReg;
1035}
1036
1037unsigned
1039 const X86Subtarget &Subtarget = MF.getSubtarget<X86Subtarget>();
1040 Register StackReg = getStackRegister();
1041 if (Subtarget.isTarget64BitILP32())
1042 StackReg = getX86SubSuperRegister(StackReg, 32);
1043 return StackReg;
1044}
1045
1047 const MachineRegisterInfo *MRI) {
1048 if (VRM->hasShape(VirtReg))
1049 return VRM->getShape(VirtReg);
1050
1051 const MachineOperand &Def = *MRI->def_begin(VirtReg);
1052 MachineInstr *MI = const_cast<MachineInstr *>(Def.getParent());
1053 unsigned OpCode = MI->getOpcode();
1054 switch (OpCode) {
1055 default:
1056 llvm_unreachable("Unexpected machine instruction on tile register!");
1057 break;
1058 case X86::COPY: {
1059 Register SrcReg = MI->getOperand(1).getReg();
1060 ShapeT Shape = getTileShape(SrcReg, VRM, MRI);
1061 VRM->assignVirt2Shape(VirtReg, Shape);
1062 return Shape;
1063 }
1064 // We only collect the tile shape that is defined.
1065 case X86::PTILELOADDV:
1066 case X86::PTILELOADDT1V:
1067 case X86::PTDPBSSDV:
1068 case X86::PTDPBSUDV:
1069 case X86::PTDPBUSDV:
1070 case X86::PTDPBUUDV:
1071 case X86::PTILEZEROV:
1072 case X86::PTDPBF16PSV:
1073 case X86::PTDPFP16PSV:
1074 case X86::PTCMMIMFP16PSV:
1075 case X86::PTCMMRLFP16PSV:
1076 MachineOperand &MO1 = MI->getOperand(1);
1077 MachineOperand &MO2 = MI->getOperand(2);
1078 ShapeT Shape(&MO1, &MO2, MRI);
1079 VRM->assignVirt2Shape(VirtReg, Shape);
1080 return Shape;
1081 }
1082}
1083
1085 ArrayRef<MCPhysReg> Order,
1087 const MachineFunction &MF,
1088 const VirtRegMap *VRM,
1089 const LiveRegMatrix *Matrix) const {
1090 const MachineRegisterInfo *MRI = &MF.getRegInfo();
1091 const TargetRegisterClass &RC = *MRI->getRegClass(VirtReg);
1092 bool BaseImplRetVal = TargetRegisterInfo::getRegAllocationHints(
1093 VirtReg, Order, Hints, MF, VRM, Matrix);
1094 const X86Subtarget &ST = MF.getSubtarget<X86Subtarget>();
1095 const TargetRegisterInfo &TRI = *ST.getRegisterInfo();
1096
1097 unsigned ID = RC.getID();
1098
1099 if (!VRM)
1100 return BaseImplRetVal;
1101
1102 if (ID != X86::TILERegClassID) {
1103 if (DisableRegAllocNDDHints || !ST.hasNDD() ||
1104 !TRI.isGeneralPurposeRegisterClass(&RC))
1105 return BaseImplRetVal;
1106
1107 // Add any two address hints after any copy hints.
1108 SmallSet<unsigned, 4> TwoAddrHints;
1109
1110 auto TryAddNDDHint = [&](const MachineOperand &MO) {
1111 Register Reg = MO.getReg();
1112 Register PhysReg =
1113 Register::isPhysicalRegister(Reg) ? Reg : Register(VRM->getPhys(Reg));
1114 if (PhysReg && !MRI->isReserved(PhysReg) && !is_contained(Hints, PhysReg))
1115 TwoAddrHints.insert(PhysReg);
1116 };
1117
1118 // NDD instructions is compressible when Op0 is allocated to the same
1119 // physic register as Op1 (or Op2 if it's commutable).
1120 for (auto &MO : MRI->reg_nodbg_operands(VirtReg)) {
1121 const MachineInstr &MI = *MO.getParent();
1122 if (!X86::getNonNDVariant(MI.getOpcode()))
1123 continue;
1124 unsigned OpIdx = MI.getOperandNo(&MO);
1125 if (OpIdx == 0) {
1126 assert(MI.getOperand(1).isReg());
1127 TryAddNDDHint(MI.getOperand(1));
1128 if (MI.isCommutable()) {
1129 assert(MI.getOperand(2).isReg());
1130 TryAddNDDHint(MI.getOperand(2));
1131 }
1132 } else if (OpIdx == 1) {
1133 TryAddNDDHint(MI.getOperand(0));
1134 } else if (MI.isCommutable() && OpIdx == 2) {
1135 TryAddNDDHint(MI.getOperand(0));
1136 }
1137 }
1138
1139 for (MCPhysReg OrderReg : Order)
1140 if (TwoAddrHints.count(OrderReg))
1141 Hints.push_back(OrderReg);
1142
1143 return BaseImplRetVal;
1144 }
1145
1146 ShapeT VirtShape = getTileShape(VirtReg, const_cast<VirtRegMap *>(VRM), MRI);
1147 auto AddHint = [&](MCPhysReg PhysReg) {
1148 Register VReg = Matrix->getOneVReg(PhysReg);
1149 if (VReg == MCRegister::NoRegister) { // Not allocated yet
1150 Hints.push_back(PhysReg);
1151 return;
1152 }
1153 ShapeT PhysShape = getTileShape(VReg, const_cast<VirtRegMap *>(VRM), MRI);
1154 if (PhysShape == VirtShape)
1155 Hints.push_back(PhysReg);
1156 };
1157
1158 SmallSet<MCPhysReg, 4> CopyHints;
1159 CopyHints.insert(Hints.begin(), Hints.end());
1160 Hints.clear();
1161 for (auto Hint : CopyHints) {
1162 if (RC.contains(Hint) && !MRI->isReserved(Hint))
1163 AddHint(Hint);
1164 }
1165 for (MCPhysReg PhysReg : Order) {
1166 if (!CopyHints.count(PhysReg) && RC.contains(PhysReg) &&
1167 !MRI->isReserved(PhysReg))
1168 AddHint(PhysReg);
1169 }
1170
1171#define DEBUG_TYPE "tile-hint"
1172 LLVM_DEBUG({
1173 dbgs() << "Hints for virtual register " << format_hex(VirtReg, 8) << "\n";
1174 for (auto Hint : Hints) {
1175 dbgs() << "tmm" << Hint << ",";
1176 }
1177 dbgs() << "\n";
1178 });
1179#undef DEBUG_TYPE
1180
1181 return true;
1182}
unsigned SubReg
unsigned const MachineRegisterInfo * MRI
static bool isFuncletReturnInstr(const MachineInstr &MI)
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator MBBI
basic Basic Alias true
This file implements the BitVector class.
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
#define LLVM_DEBUG(X)
Definition: Debug.h:101
Rewrite Partial Register Uses
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
Live Register Matrix
static cl::opt< bool > EnableBasePointer("m68k-use-base-pointer", cl::Hidden, cl::init(true), cl::desc("Enable use of a base pointer for complex stack frames"))
static bool CantUseSP(const MachineFrameInfo &MFI)
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
unsigned const TargetRegisterInfo * TRI
uint64_t IntrinsicInst * II
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file contains some templates that are useful if you are working with the STL at all.
This file defines the SmallSet class.
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
Definition: Value.cpp:469
static cl::opt< bool > EnableBasePointer("x86-use-base-pointer", cl::Hidden, cl::init(true), cl::desc("Enable use of a base pointer for complex stack frames"))
static bool tryOptimizeLEAtoMOV(MachineBasicBlock::iterator II)
static cl::opt< bool > DisableRegAllocNDDHints("x86-disable-regalloc-hints-for-ndd", cl::Hidden, cl::init(false), cl::desc("Disable two address hints for register " "allocation"))
static ShapeT getTileShape(Register VirtReg, VirtRegMap *VRM, const MachineRegisterInfo *MRI)
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition: Function.h:281
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition: Function.cpp:743
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, const DebugLoc &DL, MCRegister DestReg, MCRegister SrcReg, bool KillSrc) const override
Emit instructions to copy a pair of physical registers.
void reportError(SMLoc L, const Twine &Msg)
Definition: MCContext.cpp:1068
MCRegAliasIterator enumerates all registers aliasing Reg.
Wrapper class representing physical registers. Should be passed by value.
Definition: MCRegister.h:33
static constexpr unsigned NoRegister
Definition: MCRegister.h:52
bool isEHFuncletEntry() const
Returns true if this is the entry block of an EH funclet.
iterator getFirstTerminator()
Returns an iterator to the first terminator instruction of this basic block.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
bool hasVarSizedObjects() const
This method may be called any time after instruction selection is complete to determine if the stack ...
bool hasOpaqueSPAdjustment() const
Returns true if the function contains opaque dynamic stack adjustments.
bool isFixedObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to a fixed stack object.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MCContext & getContext() const
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
Representation of each machine instruction.
Definition: MachineInstr.h:69
MachineOperand class - Representation of each machine instruction operand.
void ChangeToImmediate(int64_t ImmVal, unsigned TargetFlags=0)
ChangeToImmediate - Replace this operand with a new immediate operand of the specified value.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
static constexpr bool isPhysicalRegister(unsigned Reg)
Return true if the specified register number is in the physical register namespace.
Definition: Register.h:65
Represents a location in source code.
Definition: SMLoc.h:23
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition: SmallSet.h:135
size_type count(const T &V) const
count - Return 1 if the element is in the set, 0 otherwise.
Definition: SmallSet.h:166
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
Definition: SmallSet.h:179
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:586
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
static StackOffset getFixed(int64_t Fixed)
Definition: TypeSize.h:42
const TargetRegisterClass *const * sc_iterator
unsigned getID() const
Return the register class ID number.
bool contains(Register Reg) const
Return true if the specified register is included in this register class.
sc_iterator getSuperClasses() const
Returns a NULL-terminated list of super-classes.
bool hasSuperClassEq(const TargetRegisterClass *RC) const
Returns true if RC is a super-class of or equal to this class.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual bool canRealignStack(const MachineFunction &MF) const
True if the stack can be realigned for the target.
virtual bool shouldRewriteCopySrc(const TargetRegisterClass *DefRC, unsigned DefSubReg, const TargetRegisterClass *SrcRC, unsigned SrcSubReg) const
virtual bool shouldRealignStack(const MachineFunction &MF) const
True if storage within the function requires the stack pointer to be aligned more than the normal cal...
virtual bool getRegAllocationHints(Register VirtReg, ArrayRef< MCPhysReg > Order, SmallVectorImpl< MCPhysReg > &Hints, const MachineFunction &MF, const VirtRegMap *VRM=nullptr, const LiveRegMatrix *Matrix=nullptr) const
Get a list of 'hint' registers that the register allocator should try first when allocating a physica...
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:44
bool hasShape(Register virtReg) const
Definition: VirtRegMap.h:116
ShapeT getShape(Register virtReg) const
Definition: VirtRegMap.h:120
MCRegister getPhys(Register virtReg) const
returns the physical register mapped to the specified virtual register
Definition: VirtRegMap.h:105
void assignVirt2Shape(Register virtReg, ShapeT shape)
Definition: VirtRegMap.h:125
bool hasFP(const MachineFunction &MF) const override
hasFP - Return true if the specified function should have a dedicated frame pointer register.
StackOffset getFrameIndexReferenceSP(const MachineFunction &MF, int FI, Register &SPReg, int Adjustment) const
StackOffset getFrameIndexReference(const MachineFunction &MF, int FI, Register &FrameReg) const override
getFrameIndexReference - This method should return the base register and offset used to reference a f...
bool Is64Bit
Is64Bit implies that x86_64 instructions are available.
bool Uses64BitFramePtr
True if the 64-bit frame or stack pointer should be used.
int getWin64EHFrameIndexRef(const MachineFunction &MF, int FI, Register &SPReg) const
X86MachineFunctionInfo - This class is derived from MachineFunction and contains private X86 target-s...
MachineInstr * getStackPtrSaveMI() const
const TargetRegisterClass * getPointerRegClass(const MachineFunction &MF, unsigned Kind=0) const override
getPointerRegClass - Returns a TargetRegisterClass used for pointer values.
unsigned getPtrSizedFrameRegister(const MachineFunction &MF) const
bool hasBasePointer(const MachineFunction &MF) const
const MCPhysReg * getCalleeSavedRegsViaCopy(const MachineFunction *MF) const
const TargetRegisterClass * getGPRsForTailCall(const MachineFunction &MF) const
getGPRsForTailCall - Returns a register class with registers that can be used in forming tail calls.
bool canRealignStack(const MachineFunction &MF) const override
BitVector getReservedRegs(const MachineFunction &MF) const override
getReservedRegs - Returns a bitset indexed by physical register number indicating if a register is a ...
bool shouldRealignStack(const MachineFunction &MF) const override
unsigned getNumSupportedRegs(const MachineFunction &MF) const override
Return the number of registers for the function.
Register getFrameRegister(const MachineFunction &MF) const override
unsigned findDeadCallerSavedReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI) const
findDeadCallerSavedReg - Return a caller-saved register that isn't live when it reaches the "return" ...
const uint32_t * getDarwinTLSCallPreservedMask() const
bool isTileRegisterClass(const TargetRegisterClass *RC) const
Return true if it is tile register class.
const uint32_t * getCallPreservedMask(const MachineFunction &MF, CallingConv::ID) const override
bool isArgumentRegister(const MachineFunction &MF, MCRegister Reg) const override
isArgumentReg - Returns true if Reg can be used as an argument to a function.
Register getStackRegister() const
const TargetRegisterClass * getLargestLegalSuperClass(const TargetRegisterClass *RC, const MachineFunction &MF) const override
const TargetRegisterClass * getMatchingSuperRegClass(const TargetRegisterClass *A, const TargetRegisterClass *B, unsigned Idx) const override
getMatchingSuperRegClass - Return a subclass of the specified register class A so that each register ...
unsigned getRegPressureLimit(const TargetRegisterClass *RC, MachineFunction &MF) const override
unsigned getPtrSizedStackRegister(const MachineFunction &MF) const
int getSEHRegNum(unsigned i) const
bool shouldRewriteCopySrc(const TargetRegisterClass *DefRC, unsigned DefSubReg, const TargetRegisterClass *SrcRC, unsigned SrcSubReg) const override
const TargetRegisterClass * getCrossCopyRegClass(const TargetRegisterClass *RC) const override
getCrossCopyRegClass - Returns a legal register class to copy a register in the specified class to or...
X86RegisterInfo(const Triple &TT)
Register getBaseRegister() const
bool getRegAllocationHints(Register VirtReg, ArrayRef< MCPhysReg > Order, SmallVectorImpl< MCPhysReg > &Hints, const MachineFunction &MF, const VirtRegMap *VRM, const LiveRegMatrix *Matrix) const override
void eliminateFrameIndex(MachineBasicBlock::iterator II, unsigned FIOperandNum, Register BaseReg, int FIOffset) const
const uint32_t * getNoPreservedMask() const override
bool isFixedRegister(const MachineFunction &MF, MCRegister PhysReg) const override
Returns true if PhysReg is a fixed register.
const TargetRegisterClass * getSubClassWithSubReg(const TargetRegisterClass *RC, unsigned Idx) const override
const MCPhysReg * getCalleeSavedRegs(const MachineFunction *MF) const override
getCalleeSavedRegs - Return a null-terminated list of all of the callee-save registers on this target...
void adjustStackMapLiveOutMask(uint32_t *Mask) const override
bool hasSSE1() const
Definition: X86Subtarget.h:193
const X86TargetLowering * getTargetLowering() const override
Definition: X86Subtarget.h:118
bool isTarget64BitILP32() const
Is this x86_64 with the ILP32 programming model (x32 ABI)?
Definition: X86Subtarget.h:173
bool isTarget64BitLP64() const
Is this x86_64 with the LP64 programming model (standard AMD64, no x32)?
Definition: X86Subtarget.h:178
bool hasAVX512() const
Definition: X86Subtarget.h:201
bool hasAVX() const
Definition: X86Subtarget.h:199
bool supportSwiftError() const override
Return true if the target supports swifterror attribute.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ X86_64_SysV
The C convention as specified in the x86-64 supplement to the System V ABI, used on most non-Windows ...
Definition: CallingConv.h:151
@ HiPE
Used by the High-Performance Erlang Compiler (HiPE).
Definition: CallingConv.h:53
@ CFGuard_Check
Special calling convention on Windows for calling the Control Guard Check ICall funtion.
Definition: CallingConv.h:82
@ PreserveMost
Used for runtime calls that preserves most registers.
Definition: CallingConv.h:63
@ AnyReg
OBSOLETED - Used for stack based JavaScript calls.
Definition: CallingConv.h:60
@ CXX_FAST_TLS
Used for access functions.
Definition: CallingConv.h:72
@ X86_INTR
x86 hardware interrupt context.
Definition: CallingConv.h:173
@ GHC
Used by the Glasgow Haskell Compiler (GHC).
Definition: CallingConv.h:50
@ Cold
Attempts to make code in the caller as efficient as possible under the assumption that the call is no...
Definition: CallingConv.h:47
@ PreserveAll
Used for runtime calls that preserves (almost) all registers.
Definition: CallingConv.h:66
@ Intel_OCL_BI
Used for Intel OpenCL built-ins.
Definition: CallingConv.h:147
@ PreserveNone
Used for runtime calls that preserves none general registers.
Definition: CallingConv.h:90
@ Win64
The C convention as implemented on Windows/x86-64 and AArch64.
Definition: CallingConv.h:159
@ SwiftTail
This follows the Swift calling convention in how arguments are passed but guarantees tail calls will ...
Definition: CallingConv.h:87
@ GRAAL
Used by GraalVM. Two additional registers are reserved.
Definition: CallingConv.h:255
@ X86_RegCall
Register calling convention used for parameters transfer optimization.
Definition: CallingConv.h:203
void initLLVMToSEHAndCVRegMapping(MCRegisterInfo *MRI)
unsigned getNonNDVariant(unsigned Opc)
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:443
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Offset
Definition: DWP.cpp:480
MCRegister getX86SubSuperRegister(MCRegister Reg, unsigned Size, bool High=false)
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1729
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
FormattedNumber format_hex(uint64_t N, unsigned Width, bool Upper=false)
format_hex - Output N as a fixed width hexadecimal.
Definition: Format.h:187
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition: STLExtras.h:1886