LLVM 20.0.0git
X86RegisterInfo.cpp
Go to the documentation of this file.
1//===-- X86RegisterInfo.cpp - X86 Register Information --------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the X86 implementation of the TargetRegisterInfo class.
10// This file is responsible for the frame pointer elimination optimization
11// on X86.
12//
13//===----------------------------------------------------------------------===//
14
15#include "X86RegisterInfo.h"
16#include "X86FrameLowering.h"
18#include "X86Subtarget.h"
19#include "llvm/ADT/BitVector.h"
20#include "llvm/ADT/STLExtras.h"
21#include "llvm/ADT/SmallSet.h"
31#include "llvm/IR/Constants.h"
32#include "llvm/IR/Function.h"
33#include "llvm/IR/Type.h"
38
39using namespace llvm;
40
41#define GET_REGINFO_TARGET_DESC
42#include "X86GenRegisterInfo.inc"
43
44static cl::opt<bool>
45EnableBasePointer("x86-use-base-pointer", cl::Hidden, cl::init(true),
46 cl::desc("Enable use of a base pointer for complex stack frames"));
47
48static cl::opt<bool>
49 DisableRegAllocNDDHints("x86-disable-regalloc-hints-for-ndd", cl::Hidden,
50 cl::init(false),
51 cl::desc("Disable two address hints for register "
52 "allocation"));
53
55 : X86GenRegisterInfo((TT.isArch64Bit() ? X86::RIP : X86::EIP),
56 X86_MC::getDwarfRegFlavour(TT, false),
57 X86_MC::getDwarfRegFlavour(TT, true),
58 (TT.isArch64Bit() ? X86::RIP : X86::EIP)) {
60
61 // Cache some information.
62 Is64Bit = TT.isArch64Bit();
63 IsWin64 = Is64Bit && TT.isOSWindows();
64
65 // Use a callee-saved register as the base pointer. These registers must
66 // not conflict with any ABI requirements. For example, in 32-bit mode PIC
67 // requires GOT in the EBX register before function calls via PLT GOT pointer.
68 if (Is64Bit) {
69 SlotSize = 8;
70 // This matches the simplified 32-bit pointer code in the data layout
71 // computation.
72 // FIXME: Should use the data layout?
73 bool Use64BitReg = !TT.isX32();
74 StackPtr = Use64BitReg ? X86::RSP : X86::ESP;
75 FramePtr = Use64BitReg ? X86::RBP : X86::EBP;
76 BasePtr = Use64BitReg ? X86::RBX : X86::EBX;
77 } else {
78 SlotSize = 4;
79 StackPtr = X86::ESP;
80 FramePtr = X86::EBP;
81 BasePtr = X86::ESI;
82 }
83}
84
85int
87 return getEncodingValue(i);
88}
89
92 unsigned Idx) const {
93 // The sub_8bit sub-register index is more constrained in 32-bit mode.
94 // It behaves just like the sub_8bit_hi index.
95 if (!Is64Bit && Idx == X86::sub_8bit)
96 Idx = X86::sub_8bit_hi;
97
98 // Forward to TableGen's default version.
99 return X86GenRegisterInfo::getSubClassWithSubReg(RC, Idx);
100}
101
104 const TargetRegisterClass *B,
105 unsigned SubIdx) const {
106 // The sub_8bit sub-register index is more constrained in 32-bit mode.
107 if (!Is64Bit && SubIdx == X86::sub_8bit) {
108 A = X86GenRegisterInfo::getSubClassWithSubReg(A, X86::sub_8bit_hi);
109 if (!A)
110 return nullptr;
111 }
112 return X86GenRegisterInfo::getMatchingSuperRegClass(A, B, SubIdx);
113}
114
117 const MachineFunction &MF) const {
118 // Don't allow super-classes of GR8_NOREX. This class is only used after
119 // extracting sub_8bit_hi sub-registers. The H sub-registers cannot be copied
120 // to the full GR8 register class in 64-bit mode, so we cannot allow the
121 // reigster class inflation.
122 //
123 // The GR8_NOREX class is always used in a way that won't be constrained to a
124 // sub-class, so sub-classes like GR8_ABCD_L are allowed to expand to the
125 // full GR8 class.
126 if (RC == &X86::GR8_NOREXRegClass)
127 return RC;
128
129 const X86Subtarget &Subtarget = MF.getSubtarget<X86Subtarget>();
130
131 const TargetRegisterClass *Super = RC;
133 do {
134 switch (Super->getID()) {
135 case X86::FR32RegClassID:
136 case X86::FR64RegClassID:
137 // If AVX-512 isn't supported we should only inflate to these classes.
138 if (!Subtarget.hasAVX512() &&
139 getRegSizeInBits(*Super) == getRegSizeInBits(*RC))
140 return Super;
141 break;
142 case X86::VR128RegClassID:
143 case X86::VR256RegClassID:
144 // If VLX isn't supported we should only inflate to these classes.
145 if (!Subtarget.hasVLX() &&
146 getRegSizeInBits(*Super) == getRegSizeInBits(*RC))
147 return Super;
148 break;
149 case X86::VR128XRegClassID:
150 case X86::VR256XRegClassID:
151 // If VLX isn't support we shouldn't inflate to these classes.
152 if (Subtarget.hasVLX() &&
153 getRegSizeInBits(*Super) == getRegSizeInBits(*RC))
154 return Super;
155 break;
156 case X86::FR32XRegClassID:
157 case X86::FR64XRegClassID:
158 // If AVX-512 isn't support we shouldn't inflate to these classes.
159 if (Subtarget.hasAVX512() &&
160 getRegSizeInBits(*Super) == getRegSizeInBits(*RC))
161 return Super;
162 break;
163 case X86::GR8RegClassID:
164 case X86::GR16RegClassID:
165 case X86::GR32RegClassID:
166 case X86::GR64RegClassID:
167 case X86::GR8_NOREX2RegClassID:
168 case X86::GR16_NOREX2RegClassID:
169 case X86::GR32_NOREX2RegClassID:
170 case X86::GR64_NOREX2RegClassID:
171 case X86::RFP32RegClassID:
172 case X86::RFP64RegClassID:
173 case X86::RFP80RegClassID:
174 case X86::VR512_0_15RegClassID:
175 case X86::VR512RegClassID:
176 // Don't return a super-class that would shrink the spill size.
177 // That can happen with the vector and float classes.
178 if (getRegSizeInBits(*Super) == getRegSizeInBits(*RC))
179 return Super;
180 }
181 Super = *I++;
182 } while (Super);
183 return RC;
184}
185
188 unsigned Kind) const {
189 const X86Subtarget &Subtarget = MF.getSubtarget<X86Subtarget>();
190 switch (Kind) {
191 default: llvm_unreachable("Unexpected Kind in getPointerRegClass!");
192 case 0: // Normal GPRs.
193 if (Subtarget.isTarget64BitLP64())
194 return &X86::GR64RegClass;
195 // If the target is 64bit but we have been told to use 32bit addresses,
196 // we can still use 64-bit register as long as we know the high bits
197 // are zeros.
198 // Reflect that in the returned register class.
199 if (Is64Bit) {
200 // When the target also allows 64-bit frame pointer and we do have a
201 // frame, this is fine to use it for the address accesses as well.
202 const X86FrameLowering *TFI = getFrameLowering(MF);
203 return TFI->hasFP(MF) && TFI->Uses64BitFramePtr
204 ? &X86::LOW32_ADDR_ACCESS_RBPRegClass
205 : &X86::LOW32_ADDR_ACCESSRegClass;
206 }
207 return &X86::GR32RegClass;
208 case 1: // Normal GPRs except the stack pointer (for encoding reasons).
209 if (Subtarget.isTarget64BitLP64())
210 return &X86::GR64_NOSPRegClass;
211 // NOSP does not contain RIP, so no special case here.
212 return &X86::GR32_NOSPRegClass;
213 case 2: // NOREX GPRs.
214 if (Subtarget.isTarget64BitLP64())
215 return &X86::GR64_NOREXRegClass;
216 return &X86::GR32_NOREXRegClass;
217 case 3: // NOREX GPRs except the stack pointer (for encoding reasons).
218 if (Subtarget.isTarget64BitLP64())
219 return &X86::GR64_NOREX_NOSPRegClass;
220 // NOSP does not contain RIP, so no special case here.
221 return &X86::GR32_NOREX_NOSPRegClass;
222 case 4: // Available for tailcall (not callee-saved GPRs).
223 return getGPRsForTailCall(MF);
224 }
225}
226
228 unsigned DefSubReg,
229 const TargetRegisterClass *SrcRC,
230 unsigned SrcSubReg) const {
231 // Prevent rewriting a copy where the destination size is larger than the
232 // input size. See PR41619.
233 // FIXME: Should this be factored into the base implementation somehow.
234 if (DefRC->hasSuperClassEq(&X86::GR64RegClass) && DefSubReg == 0 &&
235 SrcRC->hasSuperClassEq(&X86::GR64RegClass) && SrcSubReg == X86::sub_32bit)
236 return false;
237
238 return TargetRegisterInfo::shouldRewriteCopySrc(DefRC, DefSubReg,
239 SrcRC, SrcSubReg);
240}
241
244 const Function &F = MF.getFunction();
245 if (IsWin64 || (F.getCallingConv() == CallingConv::Win64))
246 return &X86::GR64_TCW64RegClass;
247 else if (Is64Bit)
248 return &X86::GR64_TCRegClass;
249
250 bool hasHipeCC = (F.getCallingConv() == CallingConv::HiPE);
251 if (hasHipeCC)
252 return &X86::GR32RegClass;
253 return &X86::GR32_TCRegClass;
254}
255
258 if (RC == &X86::CCRRegClass) {
259 if (Is64Bit)
260 return &X86::GR64RegClass;
261 else
262 return &X86::GR32RegClass;
263 }
264 return RC;
265}
266
267unsigned
269 MachineFunction &MF) const {
270 const X86FrameLowering *TFI = getFrameLowering(MF);
271
272 unsigned FPDiff = TFI->hasFP(MF) ? 1 : 0;
273 switch (RC->getID()) {
274 default:
275 return 0;
276 case X86::GR32RegClassID:
277 return 4 - FPDiff;
278 case X86::GR64RegClassID:
279 return 12 - FPDiff;
280 case X86::VR128RegClassID:
281 return Is64Bit ? 10 : 4;
282 case X86::VR64RegClassID:
283 return 4;
284 }
285}
286
287const MCPhysReg *
289 assert(MF && "MachineFunction required");
290
291 const X86Subtarget &Subtarget = MF->getSubtarget<X86Subtarget>();
292 const Function &F = MF->getFunction();
293 bool HasSSE = Subtarget.hasSSE1();
294 bool HasAVX = Subtarget.hasAVX();
295 bool HasAVX512 = Subtarget.hasAVX512();
296 bool CallsEHReturn = MF->callsEHReturn();
297
298 CallingConv::ID CC = F.getCallingConv();
299
300 // If attribute NoCallerSavedRegisters exists then we set X86_INTR calling
301 // convention because it has the CSR list.
302 if (MF->getFunction().hasFnAttribute("no_caller_saved_registers"))
304
305 // If atribute specified, override the CSRs normally specified by the
306 // calling convention and use the empty set instead.
307 if (MF->getFunction().hasFnAttribute("no_callee_saved_registers"))
308 return CSR_NoRegs_SaveList;
309
310 switch (CC) {
311 case CallingConv::GHC:
313 return CSR_NoRegs_SaveList;
315 if (HasAVX)
316 return CSR_64_AllRegs_AVX_SaveList;
317 return CSR_64_AllRegs_SaveList;
319 return IsWin64 ? CSR_Win64_RT_MostRegs_SaveList
320 : CSR_64_RT_MostRegs_SaveList;
322 if (HasAVX)
323 return CSR_64_RT_AllRegs_AVX_SaveList;
324 return CSR_64_RT_AllRegs_SaveList;
326 return CSR_64_NoneRegs_SaveList;
328 if (Is64Bit)
329 return MF->getInfo<X86MachineFunctionInfo>()->isSplitCSR() ?
330 CSR_64_CXX_TLS_Darwin_PE_SaveList : CSR_64_TLS_Darwin_SaveList;
331 break;
333 if (HasAVX512 && IsWin64)
334 return CSR_Win64_Intel_OCL_BI_AVX512_SaveList;
335 if (HasAVX512 && Is64Bit)
336 return CSR_64_Intel_OCL_BI_AVX512_SaveList;
337 if (HasAVX && IsWin64)
338 return CSR_Win64_Intel_OCL_BI_AVX_SaveList;
339 if (HasAVX && Is64Bit)
340 return CSR_64_Intel_OCL_BI_AVX_SaveList;
341 if (!HasAVX && !IsWin64 && Is64Bit)
342 return CSR_64_Intel_OCL_BI_SaveList;
343 break;
344 }
346 if (Is64Bit) {
347 if (IsWin64) {
348 return (HasSSE ? CSR_Win64_RegCall_SaveList :
349 CSR_Win64_RegCall_NoSSE_SaveList);
350 } else {
351 return (HasSSE ? CSR_SysV64_RegCall_SaveList :
352 CSR_SysV64_RegCall_NoSSE_SaveList);
353 }
354 } else {
355 return (HasSSE ? CSR_32_RegCall_SaveList :
356 CSR_32_RegCall_NoSSE_SaveList);
357 }
359 assert(!Is64Bit && "CFGuard check mechanism only used on 32-bit X86");
360 return (HasSSE ? CSR_Win32_CFGuard_Check_SaveList
361 : CSR_Win32_CFGuard_Check_NoSSE_SaveList);
363 if (Is64Bit)
364 return CSR_64_MostRegs_SaveList;
365 break;
367 if (!HasSSE)
368 return CSR_Win64_NoSSE_SaveList;
369 return CSR_Win64_SaveList;
371 if (!Is64Bit)
372 return CSR_32_SaveList;
373 return IsWin64 ? CSR_Win64_SwiftTail_SaveList : CSR_64_SwiftTail_SaveList;
375 if (CallsEHReturn)
376 return CSR_64EHRet_SaveList;
377 return CSR_64_SaveList;
379 if (Is64Bit) {
380 if (HasAVX512)
381 return CSR_64_AllRegs_AVX512_SaveList;
382 if (HasAVX)
383 return CSR_64_AllRegs_AVX_SaveList;
384 if (HasSSE)
385 return CSR_64_AllRegs_SaveList;
386 return CSR_64_AllRegs_NoSSE_SaveList;
387 } else {
388 if (HasAVX512)
389 return CSR_32_AllRegs_AVX512_SaveList;
390 if (HasAVX)
391 return CSR_32_AllRegs_AVX_SaveList;
392 if (HasSSE)
393 return CSR_32_AllRegs_SSE_SaveList;
394 return CSR_32_AllRegs_SaveList;
395 }
396 default:
397 break;
398 }
399
400 if (Is64Bit) {
401 bool IsSwiftCC = Subtarget.getTargetLowering()->supportSwiftError() &&
402 F.getAttributes().hasAttrSomewhere(Attribute::SwiftError);
403 if (IsSwiftCC)
404 return IsWin64 ? CSR_Win64_SwiftError_SaveList
405 : CSR_64_SwiftError_SaveList;
406
407 if (IsWin64)
408 return HasSSE ? CSR_Win64_SaveList : CSR_Win64_NoSSE_SaveList;
409 if (CallsEHReturn)
410 return CSR_64EHRet_SaveList;
411 return CSR_64_SaveList;
412 }
413
414 return CallsEHReturn ? CSR_32EHRet_SaveList : CSR_32_SaveList;
415}
416
418 const MachineFunction *MF) const {
419 assert(MF && "Invalid MachineFunction pointer.");
422 return CSR_64_CXX_TLS_Darwin_ViaCopy_SaveList;
423 return nullptr;
424}
425
426const uint32_t *
428 CallingConv::ID CC) const {
429 const X86Subtarget &Subtarget = MF.getSubtarget<X86Subtarget>();
430 bool HasSSE = Subtarget.hasSSE1();
431 bool HasAVX = Subtarget.hasAVX();
432 bool HasAVX512 = Subtarget.hasAVX512();
433
434 switch (CC) {
435 case CallingConv::GHC:
437 return CSR_NoRegs_RegMask;
439 if (HasAVX)
440 return CSR_64_AllRegs_AVX_RegMask;
441 return CSR_64_AllRegs_RegMask;
443 return IsWin64 ? CSR_Win64_RT_MostRegs_RegMask : CSR_64_RT_MostRegs_RegMask;
445 if (HasAVX)
446 return CSR_64_RT_AllRegs_AVX_RegMask;
447 return CSR_64_RT_AllRegs_RegMask;
449 return CSR_64_NoneRegs_RegMask;
451 if (Is64Bit)
452 return CSR_64_TLS_Darwin_RegMask;
453 break;
455 if (HasAVX512 && IsWin64)
456 return CSR_Win64_Intel_OCL_BI_AVX512_RegMask;
457 if (HasAVX512 && Is64Bit)
458 return CSR_64_Intel_OCL_BI_AVX512_RegMask;
459 if (HasAVX && IsWin64)
460 return CSR_Win64_Intel_OCL_BI_AVX_RegMask;
461 if (HasAVX && Is64Bit)
462 return CSR_64_Intel_OCL_BI_AVX_RegMask;
463 if (!HasAVX && !IsWin64 && Is64Bit)
464 return CSR_64_Intel_OCL_BI_RegMask;
465 break;
466 }
468 if (Is64Bit) {
469 if (IsWin64) {
470 return (HasSSE ? CSR_Win64_RegCall_RegMask :
471 CSR_Win64_RegCall_NoSSE_RegMask);
472 } else {
473 return (HasSSE ? CSR_SysV64_RegCall_RegMask :
474 CSR_SysV64_RegCall_NoSSE_RegMask);
475 }
476 } else {
477 return (HasSSE ? CSR_32_RegCall_RegMask :
478 CSR_32_RegCall_NoSSE_RegMask);
479 }
481 assert(!Is64Bit && "CFGuard check mechanism only used on 32-bit X86");
482 return (HasSSE ? CSR_Win32_CFGuard_Check_RegMask
483 : CSR_Win32_CFGuard_Check_NoSSE_RegMask);
485 if (Is64Bit)
486 return CSR_64_MostRegs_RegMask;
487 break;
489 return CSR_Win64_RegMask;
491 if (!Is64Bit)
492 return CSR_32_RegMask;
493 return IsWin64 ? CSR_Win64_SwiftTail_RegMask : CSR_64_SwiftTail_RegMask;
495 return CSR_64_RegMask;
497 if (Is64Bit) {
498 if (HasAVX512)
499 return CSR_64_AllRegs_AVX512_RegMask;
500 if (HasAVX)
501 return CSR_64_AllRegs_AVX_RegMask;
502 if (HasSSE)
503 return CSR_64_AllRegs_RegMask;
504 return CSR_64_AllRegs_NoSSE_RegMask;
505 } else {
506 if (HasAVX512)
507 return CSR_32_AllRegs_AVX512_RegMask;
508 if (HasAVX)
509 return CSR_32_AllRegs_AVX_RegMask;
510 if (HasSSE)
511 return CSR_32_AllRegs_SSE_RegMask;
512 return CSR_32_AllRegs_RegMask;
513 }
514 default:
515 break;
516 }
517
518 // Unlike getCalleeSavedRegs(), we don't have MMI so we can't check
519 // callsEHReturn().
520 if (Is64Bit) {
521 const Function &F = MF.getFunction();
522 bool IsSwiftCC = Subtarget.getTargetLowering()->supportSwiftError() &&
523 F.getAttributes().hasAttrSomewhere(Attribute::SwiftError);
524 if (IsSwiftCC)
525 return IsWin64 ? CSR_Win64_SwiftError_RegMask : CSR_64_SwiftError_RegMask;
526
527 return IsWin64 ? CSR_Win64_RegMask : CSR_64_RegMask;
528 }
529
530 return CSR_32_RegMask;
531}
532
533const uint32_t*
535 return CSR_NoRegs_RegMask;
536}
537
539 return CSR_64_TLS_Darwin_RegMask;
540}
541
543 BitVector Reserved(getNumRegs());
544 const X86FrameLowering *TFI = getFrameLowering(MF);
545
546 // Set the floating point control register as reserved.
547 Reserved.set(X86::FPCW);
548
549 // Set the floating point status register as reserved.
550 Reserved.set(X86::FPSW);
551
552 // Set the SIMD floating point control register as reserved.
553 Reserved.set(X86::MXCSR);
554
555 // Set the stack-pointer register and its aliases as reserved.
556 for (const MCPhysReg &SubReg : subregs_inclusive(X86::RSP))
557 Reserved.set(SubReg);
558
559 // Set the Shadow Stack Pointer as reserved.
560 Reserved.set(X86::SSP);
561
562 // Set the instruction pointer register and its aliases as reserved.
563 for (const MCPhysReg &SubReg : subregs_inclusive(X86::RIP))
564 Reserved.set(SubReg);
565
566 // Set the frame-pointer register and its aliases as reserved if needed.
567 if (TFI->hasFP(MF)) {
568 for (const MCPhysReg &SubReg : subregs_inclusive(X86::RBP))
569 Reserved.set(SubReg);
570 }
571
572 // Set the base-pointer register and its aliases as reserved if needed.
573 if (hasBasePointer(MF)) {
575 const uint32_t *RegMask = getCallPreservedMask(MF, CC);
578 "Stack realignment in presence of dynamic allocas is not supported with"
579 "this calling convention.");
580
582 for (const MCPhysReg &SubReg : subregs_inclusive(BasePtr))
583 Reserved.set(SubReg);
584 }
585
586 // Mark the segment registers as reserved.
587 Reserved.set(X86::CS);
588 Reserved.set(X86::SS);
589 Reserved.set(X86::DS);
590 Reserved.set(X86::ES);
591 Reserved.set(X86::FS);
592 Reserved.set(X86::GS);
593
594 // Mark the floating point stack registers as reserved.
595 for (unsigned n = 0; n != 8; ++n)
596 Reserved.set(X86::ST0 + n);
597
598 // Reserve the registers that only exist in 64-bit mode.
599 if (!Is64Bit) {
600 // These 8-bit registers are part of the x86-64 extension even though their
601 // super-registers are old 32-bits.
602 Reserved.set(X86::SIL);
603 Reserved.set(X86::DIL);
604 Reserved.set(X86::BPL);
605 Reserved.set(X86::SPL);
606 Reserved.set(X86::SIH);
607 Reserved.set(X86::DIH);
608 Reserved.set(X86::BPH);
609 Reserved.set(X86::SPH);
610
611 for (unsigned n = 0; n != 8; ++n) {
612 // R8, R9, ...
613 for (MCRegAliasIterator AI(X86::R8 + n, this, true); AI.isValid(); ++AI)
614 Reserved.set(*AI);
615
616 // XMM8, XMM9, ...
617 for (MCRegAliasIterator AI(X86::XMM8 + n, this, true); AI.isValid(); ++AI)
618 Reserved.set(*AI);
619 }
620 }
621 if (!Is64Bit || !MF.getSubtarget<X86Subtarget>().hasAVX512()) {
622 for (unsigned n = 0; n != 16; ++n) {
623 for (MCRegAliasIterator AI(X86::XMM16 + n, this, true); AI.isValid();
624 ++AI)
625 Reserved.set(*AI);
626 }
627 }
628
629 // Reserve the extended general purpose registers.
630 if (!Is64Bit || !MF.getSubtarget<X86Subtarget>().hasEGPR())
631 Reserved.set(X86::R16, X86::R31WH + 1);
632
634 for (MCRegAliasIterator AI(X86::R14, this, true); AI.isValid(); ++AI)
635 Reserved.set(*AI);
636 for (MCRegAliasIterator AI(X86::R15, this, true); AI.isValid(); ++AI)
637 Reserved.set(*AI);
638 }
639
640 assert(checkAllSuperRegsMarked(Reserved,
641 {X86::SIL, X86::DIL, X86::BPL, X86::SPL,
642 X86::SIH, X86::DIH, X86::BPH, X86::SPH}));
643 return Reserved;
644}
645
647 // All existing Intel CPUs that support AMX support AVX512 and all existing
648 // Intel CPUs that support APX support AMX. AVX512 implies AVX.
649 //
650 // We enumerate the registers in X86GenRegisterInfo.inc in this order:
651 //
652 // Registers before AVX512,
653 // AVX512 registers (X/YMM16-31, ZMM0-31, K registers)
654 // AMX registers (TMM)
655 // APX registers (R16-R31)
656 //
657 // and try to return the minimum number of registers supported by the target.
658 static_assert((X86::R15WH + 1 == X86::YMM0) && (X86::YMM15 + 1 == X86::K0) &&
659 (X86::K6_K7 + 1 == X86::TMMCFG) &&
660 (X86::TMM7 + 1 == X86::R16) &&
661 (X86::R31WH + 1 == X86::NUM_TARGET_REGS),
662 "Register number may be incorrect");
663
664 const X86Subtarget &ST = MF.getSubtarget<X86Subtarget>();
665 if (ST.hasEGPR())
666 return X86::NUM_TARGET_REGS;
667 if (ST.hasAMXTILE())
668 return X86::TMM7 + 1;
669 if (ST.hasAVX512())
670 return X86::K6_K7 + 1;
671 if (ST.hasAVX())
672 return X86::YMM15 + 1;
673 return X86::R15WH + 1;
674}
675
677 MCRegister Reg) const {
678 const X86Subtarget &ST = MF.getSubtarget<X86Subtarget>();
679 const TargetRegisterInfo &TRI = *ST.getRegisterInfo();
680 auto IsSubReg = [&](MCRegister RegA, MCRegister RegB) {
681 return TRI.isSuperOrSubRegisterEq(RegA, RegB);
682 };
683
684 if (!ST.is64Bit())
685 return llvm::any_of(
686 SmallVector<MCRegister>{X86::EAX, X86::ECX, X86::EDX},
687 [&](MCRegister &RegA) { return IsSubReg(RegA, Reg); }) ||
688 (ST.hasMMX() && X86::VR64RegClass.contains(Reg));
689
691
692 if (CC == CallingConv::X86_64_SysV && IsSubReg(X86::RAX, Reg))
693 return true;
694
695 if (llvm::any_of(
696 SmallVector<MCRegister>{X86::RDX, X86::RCX, X86::R8, X86::R9},
697 [&](MCRegister &RegA) { return IsSubReg(RegA, Reg); }))
698 return true;
699
700 if (CC != CallingConv::Win64 &&
701 llvm::any_of(SmallVector<MCRegister>{X86::RDI, X86::RSI},
702 [&](MCRegister &RegA) { return IsSubReg(RegA, Reg); }))
703 return true;
704
705 if (ST.hasSSE1() &&
706 llvm::any_of(SmallVector<MCRegister>{X86::XMM0, X86::XMM1, X86::XMM2,
707 X86::XMM3, X86::XMM4, X86::XMM5,
708 X86::XMM6, X86::XMM7},
709 [&](MCRegister &RegA) { return IsSubReg(RegA, Reg); }))
710 return true;
711
712 return X86GenRegisterInfo::isArgumentRegister(MF, Reg);
713}
714
716 MCRegister PhysReg) const {
717 const X86Subtarget &ST = MF.getSubtarget<X86Subtarget>();
718 const TargetRegisterInfo &TRI = *ST.getRegisterInfo();
719
720 // Stack pointer.
721 if (TRI.isSuperOrSubRegisterEq(X86::RSP, PhysReg))
722 return true;
723
724 // Don't use the frame pointer if it's being used.
725 const X86FrameLowering &TFI = *getFrameLowering(MF);
726 if (TFI.hasFP(MF) && TRI.isSuperOrSubRegisterEq(X86::RBP, PhysReg))
727 return true;
728
729 return X86GenRegisterInfo::isFixedRegister(MF, PhysReg);
730}
731
733 return RC->getID() == X86::TILERegClassID;
734}
735
737 // Check if the EFLAGS register is marked as live-out. This shouldn't happen,
738 // because the calling convention defines the EFLAGS register as NOT
739 // preserved.
740 //
741 // Unfortunatelly the EFLAGS show up as live-out after branch folding. Adding
742 // an assert to track this and clear the register afterwards to avoid
743 // unnecessary crashes during release builds.
744 assert(!(Mask[X86::EFLAGS / 32] & (1U << (X86::EFLAGS % 32))) &&
745 "EFLAGS are not live-out from a patchpoint.");
746
747 // Also clean other registers that don't need preserving (IP).
748 for (auto Reg : {X86::EFLAGS, X86::RIP, X86::EIP, X86::IP})
749 Mask[Reg / 32] &= ~(1U << (Reg % 32));
750}
751
752//===----------------------------------------------------------------------===//
753// Stack Frame Processing methods
754//===----------------------------------------------------------------------===//
755
756static bool CantUseSP(const MachineFrameInfo &MFI) {
757 return MFI.hasVarSizedObjects() || MFI.hasOpaqueSPAdjustment();
758}
759
762 // We have a virtual register to reference argument, and don't need base
763 // pointer.
764 if (X86FI->getStackPtrSaveMI() != nullptr)
765 return false;
766
767 if (X86FI->hasPreallocatedCall())
768 return true;
769
770 const MachineFrameInfo &MFI = MF.getFrameInfo();
771
773 return false;
774
775 // When we need stack realignment, we can't address the stack from the frame
776 // pointer. When we have dynamic allocas or stack-adjusting inline asm, we
777 // can't address variables from the stack pointer. MS inline asm can
778 // reference locals while also adjusting the stack pointer. When we can't
779 // use both the SP and the FP, we need a separate base pointer register.
780 bool CantUseFP = hasStackRealignment(MF);
781 return CantUseFP && CantUseSP(MFI);
782}
783
786 return false;
787
788 const MachineFrameInfo &MFI = MF.getFrameInfo();
789 const MachineRegisterInfo *MRI = &MF.getRegInfo();
790
791 // Stack realignment requires a frame pointer. If we already started
792 // register allocation with frame pointer elimination, it is too late now.
793 if (!MRI->canReserveReg(FramePtr))
794 return false;
795
796 // If a base pointer is necessary. Check that it isn't too late to reserve
797 // it.
798 if (CantUseSP(MFI))
799 return MRI->canReserveReg(BasePtr);
800 return true;
801}
802
805 return true;
806
807 return !Is64Bit && MF.getFunction().getCallingConv() == CallingConv::X86_INTR;
808}
809
810// tryOptimizeLEAtoMOV - helper function that tries to replace a LEA instruction
811// of the form 'lea (%esp), %ebx' --> 'mov %esp, %ebx'.
812// TODO: In this case we should be really trying first to entirely eliminate
813// this instruction which is a plain copy.
815 MachineInstr &MI = *II;
816 unsigned Opc = II->getOpcode();
817 // Check if this is a LEA of the form 'lea (%esp), %ebx'
818 if ((Opc != X86::LEA32r && Opc != X86::LEA64r && Opc != X86::LEA64_32r) ||
819 MI.getOperand(2).getImm() != 1 ||
820 MI.getOperand(3).getReg() != X86::NoRegister ||
821 MI.getOperand(4).getImm() != 0 ||
822 MI.getOperand(5).getReg() != X86::NoRegister)
823 return false;
824 Register BasePtr = MI.getOperand(1).getReg();
825 // In X32 mode, ensure the base-pointer is a 32-bit operand, so the LEA will
826 // be replaced with a 32-bit operand MOV which will zero extend the upper
827 // 32-bits of the super register.
828 if (Opc == X86::LEA64_32r)
829 BasePtr = getX86SubSuperRegister(BasePtr, 32);
830 Register NewDestReg = MI.getOperand(0).getReg();
831 const X86InstrInfo *TII =
832 MI.getParent()->getParent()->getSubtarget<X86Subtarget>().getInstrInfo();
833 TII->copyPhysReg(*MI.getParent(), II, MI.getDebugLoc(), NewDestReg, BasePtr,
834 MI.getOperand(1).isKill());
835 MI.eraseFromParent();
836 return true;
837}
838
840 switch (MI.getOpcode()) {
841 case X86::CATCHRET:
842 case X86::CLEANUPRET:
843 return true;
844 default:
845 return false;
846 }
847 llvm_unreachable("impossible");
848}
849
851 unsigned FIOperandNum,
852 Register BaseReg,
853 int FIOffset) const {
854 MachineInstr &MI = *II;
855 unsigned Opc = MI.getOpcode();
856 if (Opc == TargetOpcode::LOCAL_ESCAPE) {
857 MachineOperand &FI = MI.getOperand(FIOperandNum);
858 FI.ChangeToImmediate(FIOffset);
859 return;
860 }
861
862 MI.getOperand(FIOperandNum).ChangeToRegister(BaseReg, false);
863
864 // The frame index format for stackmaps and patchpoints is different from the
865 // X86 format. It only has a FI and an offset.
866 if (Opc == TargetOpcode::STACKMAP || Opc == TargetOpcode::PATCHPOINT) {
867 assert(BasePtr == FramePtr && "Expected the FP as base register");
868 int64_t Offset = MI.getOperand(FIOperandNum + 1).getImm() + FIOffset;
869 MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset);
870 return;
871 }
872
873 if (MI.getOperand(FIOperandNum + 3).isImm()) {
874 // Offset is a 32-bit integer.
875 int Imm = (int)(MI.getOperand(FIOperandNum + 3).getImm());
876 int Offset = FIOffset + Imm;
877 assert((!Is64Bit || isInt<32>((long long)FIOffset + Imm)) &&
878 "Requesting 64-bit offset in 32-bit immediate!");
879 if (Offset != 0)
880 MI.getOperand(FIOperandNum + 3).ChangeToImmediate(Offset);
881 } else {
882 // Offset is symbolic. This is extremely rare.
884 FIOffset + (uint64_t)MI.getOperand(FIOperandNum + 3).getOffset();
885 MI.getOperand(FIOperandNum + 3).setOffset(Offset);
886 }
887}
888
889bool
891 int SPAdj, unsigned FIOperandNum,
892 RegScavenger *RS) const {
893 MachineInstr &MI = *II;
894 MachineBasicBlock &MBB = *MI.getParent();
897 bool IsEHFuncletEpilogue = MBBI == MBB.end() ? false
899 const X86FrameLowering *TFI = getFrameLowering(MF);
900 int FrameIndex = MI.getOperand(FIOperandNum).getIndex();
901
902 // Determine base register and offset.
903 int FIOffset;
904 Register BasePtr;
905 if (MI.isReturn()) {
906 assert((!hasStackRealignment(MF) ||
907 MF.getFrameInfo().isFixedObjectIndex(FrameIndex)) &&
908 "Return instruction can only reference SP relative frame objects");
909 FIOffset =
910 TFI->getFrameIndexReferenceSP(MF, FrameIndex, BasePtr, 0).getFixed();
911 } else if (TFI->Is64Bit && (MBB.isEHFuncletEntry() || IsEHFuncletEpilogue)) {
912 FIOffset = TFI->getWin64EHFrameIndexRef(MF, FrameIndex, BasePtr);
913 } else {
914 FIOffset = TFI->getFrameIndexReference(MF, FrameIndex, BasePtr).getFixed();
915 }
916
917 // LOCAL_ESCAPE uses a single offset, with no register. It only works in the
918 // simple FP case, and doesn't work with stack realignment. On 32-bit, the
919 // offset is from the traditional base pointer location. On 64-bit, the
920 // offset is from the SP at the end of the prologue, not the FP location. This
921 // matches the behavior of llvm.frameaddress.
922 unsigned Opc = MI.getOpcode();
923 if (Opc == TargetOpcode::LOCAL_ESCAPE) {
924 MachineOperand &FI = MI.getOperand(FIOperandNum);
925 FI.ChangeToImmediate(FIOffset);
926 return false;
927 }
928
929 // For LEA64_32r when BasePtr is 32-bits (X32) we can use full-size 64-bit
930 // register as source operand, semantic is the same and destination is
931 // 32-bits. It saves one byte per lea in code since 0x67 prefix is avoided.
932 // Don't change BasePtr since it is used later for stack adjustment.
933 Register MachineBasePtr = BasePtr;
934 if (Opc == X86::LEA64_32r && X86::GR32RegClass.contains(BasePtr))
935 MachineBasePtr = getX86SubSuperRegister(BasePtr, 64);
936
937 // This must be part of a four operand memory reference. Replace the
938 // FrameIndex with base register. Add an offset to the offset.
939 MI.getOperand(FIOperandNum).ChangeToRegister(MachineBasePtr, false);
940
941 if (BasePtr == StackPtr)
942 FIOffset += SPAdj;
943
944 // The frame index format for stackmaps and patchpoints is different from the
945 // X86 format. It only has a FI and an offset.
946 if (Opc == TargetOpcode::STACKMAP || Opc == TargetOpcode::PATCHPOINT) {
947 assert(BasePtr == FramePtr && "Expected the FP as base register");
948 int64_t Offset = MI.getOperand(FIOperandNum + 1).getImm() + FIOffset;
949 MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset);
950 return false;
951 }
952
953 if (MI.getOperand(FIOperandNum+3).isImm()) {
954 // Offset is a 32-bit integer.
955 int Imm = (int)(MI.getOperand(FIOperandNum + 3).getImm());
956 int Offset = FIOffset + Imm;
957 assert((!Is64Bit || isInt<32>((long long)FIOffset + Imm)) &&
958 "Requesting 64-bit offset in 32-bit immediate!");
959 if (Offset != 0 || !tryOptimizeLEAtoMOV(II))
960 MI.getOperand(FIOperandNum + 3).ChangeToImmediate(Offset);
961 } else {
962 // Offset is symbolic. This is extremely rare.
963 uint64_t Offset = FIOffset +
964 (uint64_t)MI.getOperand(FIOperandNum+3).getOffset();
965 MI.getOperand(FIOperandNum + 3).setOffset(Offset);
966 }
967 return false;
968}
969
972 const MachineFunction *MF = MBB.getParent();
973 if (MF->callsEHReturn())
974 return 0;
975
976 const TargetRegisterClass &AvailableRegs = *getGPRsForTailCall(*MF);
977
978 if (MBBI == MBB.end())
979 return 0;
980
981 switch (MBBI->getOpcode()) {
982 default:
983 return 0;
984 case TargetOpcode::PATCHABLE_RET:
985 case X86::RET:
986 case X86::RET32:
987 case X86::RET64:
988 case X86::RETI32:
989 case X86::RETI64:
990 case X86::TCRETURNdi:
991 case X86::TCRETURNri:
992 case X86::TCRETURNmi:
993 case X86::TCRETURNdi64:
994 case X86::TCRETURNri64:
995 case X86::TCRETURNmi64:
996 case X86::EH_RETURN:
997 case X86::EH_RETURN64: {
999 for (MachineOperand &MO : MBBI->operands()) {
1000 if (!MO.isReg() || MO.isDef())
1001 continue;
1002 Register Reg = MO.getReg();
1003 if (!Reg)
1004 continue;
1005 for (MCRegAliasIterator AI(Reg, this, true); AI.isValid(); ++AI)
1006 Uses.insert(*AI);
1007 }
1008
1009 for (auto CS : AvailableRegs)
1010 if (!Uses.count(CS) && CS != X86::RIP && CS != X86::RSP && CS != X86::ESP)
1011 return CS;
1012 }
1013 }
1014
1015 return 0;
1016}
1017
1019 const X86FrameLowering *TFI = getFrameLowering(MF);
1020 return TFI->hasFP(MF) ? FramePtr : StackPtr;
1021}
1022
1023unsigned
1025 const X86Subtarget &Subtarget = MF.getSubtarget<X86Subtarget>();
1026 Register FrameReg = getFrameRegister(MF);
1027 if (Subtarget.isTarget64BitILP32())
1028 FrameReg = getX86SubSuperRegister(FrameReg, 32);
1029 return FrameReg;
1030}
1031
1032unsigned
1034 const X86Subtarget &Subtarget = MF.getSubtarget<X86Subtarget>();
1035 Register StackReg = getStackRegister();
1036 if (Subtarget.isTarget64BitILP32())
1037 StackReg = getX86SubSuperRegister(StackReg, 32);
1038 return StackReg;
1039}
1040
1042 const MachineRegisterInfo *MRI) {
1043 if (VRM->hasShape(VirtReg))
1044 return VRM->getShape(VirtReg);
1045
1046 const MachineOperand &Def = *MRI->def_begin(VirtReg);
1047 MachineInstr *MI = const_cast<MachineInstr *>(Def.getParent());
1048 unsigned OpCode = MI->getOpcode();
1049 switch (OpCode) {
1050 default:
1051 llvm_unreachable("Unexpected machine instruction on tile register!");
1052 break;
1053 case X86::COPY: {
1054 Register SrcReg = MI->getOperand(1).getReg();
1055 ShapeT Shape = getTileShape(SrcReg, VRM, MRI);
1056 VRM->assignVirt2Shape(VirtReg, Shape);
1057 return Shape;
1058 }
1059 // We only collect the tile shape that is defined.
1060 case X86::PTILELOADDV:
1061 case X86::PTILELOADDT1V:
1062 case X86::PTDPBSSDV:
1063 case X86::PTDPBSUDV:
1064 case X86::PTDPBUSDV:
1065 case X86::PTDPBUUDV:
1066 case X86::PTILEZEROV:
1067 case X86::PTDPBF16PSV:
1068 case X86::PTDPFP16PSV:
1069 case X86::PTCMMIMFP16PSV:
1070 case X86::PTCMMRLFP16PSV:
1071 MachineOperand &MO1 = MI->getOperand(1);
1072 MachineOperand &MO2 = MI->getOperand(2);
1073 ShapeT Shape(&MO1, &MO2, MRI);
1074 VRM->assignVirt2Shape(VirtReg, Shape);
1075 return Shape;
1076 }
1077}
1078
1080 ArrayRef<MCPhysReg> Order,
1082 const MachineFunction &MF,
1083 const VirtRegMap *VRM,
1084 const LiveRegMatrix *Matrix) const {
1085 const MachineRegisterInfo *MRI = &MF.getRegInfo();
1086 const TargetRegisterClass &RC = *MRI->getRegClass(VirtReg);
1087 bool BaseImplRetVal = TargetRegisterInfo::getRegAllocationHints(
1088 VirtReg, Order, Hints, MF, VRM, Matrix);
1089 const X86Subtarget &ST = MF.getSubtarget<X86Subtarget>();
1090 const TargetRegisterInfo &TRI = *ST.getRegisterInfo();
1091
1092 unsigned ID = RC.getID();
1093
1094 if (!VRM)
1095 return BaseImplRetVal;
1096
1097 if (ID != X86::TILERegClassID) {
1098 if (DisableRegAllocNDDHints || !ST.hasNDD() ||
1099 !TRI.isGeneralPurposeRegisterClass(&RC))
1100 return BaseImplRetVal;
1101
1102 // Add any two address hints after any copy hints.
1103 SmallSet<unsigned, 4> TwoAddrHints;
1104
1105 auto TryAddNDDHint = [&](const MachineOperand &MO) {
1106 Register Reg = MO.getReg();
1107 Register PhysReg =
1108 Register::isPhysicalRegister(Reg) ? Reg : Register(VRM->getPhys(Reg));
1109 if (PhysReg && !MRI->isReserved(PhysReg) && !is_contained(Hints, PhysReg))
1110 TwoAddrHints.insert(PhysReg);
1111 };
1112
1113 // NDD instructions is compressible when Op0 is allocated to the same
1114 // physic register as Op1 (or Op2 if it's commutable).
1115 for (auto &MO : MRI->reg_nodbg_operands(VirtReg)) {
1116 const MachineInstr &MI = *MO.getParent();
1117 if (!X86::getNonNDVariant(MI.getOpcode()))
1118 continue;
1119 unsigned OpIdx = MI.getOperandNo(&MO);
1120 if (OpIdx == 0) {
1121 assert(MI.getOperand(1).isReg());
1122 TryAddNDDHint(MI.getOperand(1));
1123 if (MI.isCommutable()) {
1124 assert(MI.getOperand(2).isReg());
1125 TryAddNDDHint(MI.getOperand(2));
1126 }
1127 } else if (OpIdx == 1) {
1128 TryAddNDDHint(MI.getOperand(0));
1129 } else if (MI.isCommutable() && OpIdx == 2) {
1130 TryAddNDDHint(MI.getOperand(0));
1131 }
1132 }
1133
1134 for (MCPhysReg OrderReg : Order)
1135 if (TwoAddrHints.count(OrderReg))
1136 Hints.push_back(OrderReg);
1137
1138 return BaseImplRetVal;
1139 }
1140
1141 ShapeT VirtShape = getTileShape(VirtReg, const_cast<VirtRegMap *>(VRM), MRI);
1142 auto AddHint = [&](MCPhysReg PhysReg) {
1143 Register VReg = Matrix->getOneVReg(PhysReg);
1144 if (VReg == MCRegister::NoRegister) { // Not allocated yet
1145 Hints.push_back(PhysReg);
1146 return;
1147 }
1148 ShapeT PhysShape = getTileShape(VReg, const_cast<VirtRegMap *>(VRM), MRI);
1149 if (PhysShape == VirtShape)
1150 Hints.push_back(PhysReg);
1151 };
1152
1153 SmallSet<MCPhysReg, 4> CopyHints;
1154 CopyHints.insert(Hints.begin(), Hints.end());
1155 Hints.clear();
1156 for (auto Hint : CopyHints) {
1157 if (RC.contains(Hint) && !MRI->isReserved(Hint))
1158 AddHint(Hint);
1159 }
1160 for (MCPhysReg PhysReg : Order) {
1161 if (!CopyHints.count(PhysReg) && RC.contains(PhysReg) &&
1162 !MRI->isReserved(PhysReg))
1163 AddHint(PhysReg);
1164 }
1165
1166#define DEBUG_TYPE "tile-hint"
1167 LLVM_DEBUG({
1168 dbgs() << "Hints for virtual register " << format_hex(VirtReg, 8) << "\n";
1169 for (auto Hint : Hints) {
1170 dbgs() << "tmm" << Hint << ",";
1171 }
1172 dbgs() << "\n";
1173 });
1174#undef DEBUG_TYPE
1175
1176 return true;
1177}
unsigned SubReg
unsigned const MachineRegisterInfo * MRI
static bool isFuncletReturnInstr(const MachineInstr &MI)
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator MBBI
basic Basic Alias true
This file implements the BitVector class.
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
#define LLVM_DEBUG(X)
Definition: Debug.h:101
Rewrite Partial Register Uses
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
Live Register Matrix
static cl::opt< bool > EnableBasePointer("m68k-use-base-pointer", cl::Hidden, cl::init(true), cl::desc("Enable use of a base pointer for complex stack frames"))
static bool CantUseSP(const MachineFrameInfo &MFI)
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
unsigned const TargetRegisterInfo * TRI
uint64_t IntrinsicInst * II
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file contains some templates that are useful if you are working with the STL at all.
This file defines the SmallSet class.
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
Definition: Value.cpp:469
static cl::opt< bool > EnableBasePointer("x86-use-base-pointer", cl::Hidden, cl::init(true), cl::desc("Enable use of a base pointer for complex stack frames"))
static bool tryOptimizeLEAtoMOV(MachineBasicBlock::iterator II)
static cl::opt< bool > DisableRegAllocNDDHints("x86-disable-regalloc-hints-for-ndd", cl::Hidden, cl::init(false), cl::desc("Disable two address hints for register " "allocation"))
static ShapeT getTileShape(Register VirtReg, VirtRegMap *VRM, const MachineRegisterInfo *MRI)
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition: Function.h:281
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition: Function.cpp:743
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, const DebugLoc &DL, MCRegister DestReg, MCRegister SrcReg, bool KillSrc) const override
Emit instructions to copy a pair of physical registers.
MCRegAliasIterator enumerates all registers aliasing Reg.
Wrapper class representing physical registers. Should be passed by value.
Definition: MCRegister.h:33
static constexpr unsigned NoRegister
Definition: MCRegister.h:52
bool isEHFuncletEntry() const
Returns true if this is the entry block of an EH funclet.
iterator getFirstTerminator()
Returns an iterator to the first terminator instruction of this basic block.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
bool hasVarSizedObjects() const
This method may be called any time after instruction selection is complete to determine if the stack ...
bool hasOpaqueSPAdjustment() const
Returns true if the function contains opaque dynamic stack adjustments.
bool isFixedObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to a fixed stack object.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
Representation of each machine instruction.
Definition: MachineInstr.h:69
MachineOperand class - Representation of each machine instruction operand.
void ChangeToImmediate(int64_t ImmVal, unsigned TargetFlags=0)
ChangeToImmediate - Replace this operand with a new immediate operand of the specified value.
static bool clobbersPhysReg(const uint32_t *RegMask, MCRegister PhysReg)
clobbersPhysReg - Returns true if this RegMask clobbers PhysReg.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
static constexpr bool isPhysicalRegister(unsigned Reg)
Return true if the specified register number is in the physical register namespace.
Definition: Register.h:65
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition: SmallSet.h:135
size_type count(const T &V) const
count - Return 1 if the element is in the set, 0 otherwise.
Definition: SmallSet.h:166
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
Definition: SmallSet.h:179
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:587
void push_back(const T &Elt)
Definition: SmallVector.h:427
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1210
static StackOffset getFixed(int64_t Fixed)
Definition: TypeSize.h:42
const TargetRegisterClass *const * sc_iterator
unsigned getID() const
Return the register class ID number.
bool contains(Register Reg) const
Return true if the specified register is included in this register class.
sc_iterator getSuperClasses() const
Returns a NULL-terminated list of super-classes.
bool hasSuperClassEq(const TargetRegisterClass *RC) const
Returns true if RC is a super-class of or equal to this class.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual bool canRealignStack(const MachineFunction &MF) const
True if the stack can be realigned for the target.
virtual bool shouldRewriteCopySrc(const TargetRegisterClass *DefRC, unsigned DefSubReg, const TargetRegisterClass *SrcRC, unsigned SrcSubReg) const
virtual bool shouldRealignStack(const MachineFunction &MF) const
True if storage within the function requires the stack pointer to be aligned more than the normal cal...
virtual bool getRegAllocationHints(Register VirtReg, ArrayRef< MCPhysReg > Order, SmallVectorImpl< MCPhysReg > &Hints, const MachineFunction &MF, const VirtRegMap *VRM=nullptr, const LiveRegMatrix *Matrix=nullptr) const
Get a list of 'hint' registers that the register allocator should try first when allocating a physica...
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:44
bool hasShape(Register virtReg) const
Definition: VirtRegMap.h:116
ShapeT getShape(Register virtReg) const
Definition: VirtRegMap.h:120
MCRegister getPhys(Register virtReg) const
returns the physical register mapped to the specified virtual register
Definition: VirtRegMap.h:105
void assignVirt2Shape(Register virtReg, ShapeT shape)
Definition: VirtRegMap.h:125
bool hasFP(const MachineFunction &MF) const override
hasFP - Return true if the specified function should have a dedicated frame pointer register.
StackOffset getFrameIndexReferenceSP(const MachineFunction &MF, int FI, Register &SPReg, int Adjustment) const
StackOffset getFrameIndexReference(const MachineFunction &MF, int FI, Register &FrameReg) const override
getFrameIndexReference - This method should return the base register and offset used to reference a f...
bool Is64Bit
Is64Bit implies that x86_64 instructions are available.
bool Uses64BitFramePtr
True if the 64-bit frame or stack pointer should be used.
int getWin64EHFrameIndexRef(const MachineFunction &MF, int FI, Register &SPReg) const
X86MachineFunctionInfo - This class is derived from MachineFunction and contains private X86 target-s...
MachineInstr * getStackPtrSaveMI() const
const TargetRegisterClass * getPointerRegClass(const MachineFunction &MF, unsigned Kind=0) const override
getPointerRegClass - Returns a TargetRegisterClass used for pointer values.
unsigned getPtrSizedFrameRegister(const MachineFunction &MF) const
bool hasBasePointer(const MachineFunction &MF) const
const MCPhysReg * getCalleeSavedRegsViaCopy(const MachineFunction *MF) const
const TargetRegisterClass * getGPRsForTailCall(const MachineFunction &MF) const
getGPRsForTailCall - Returns a register class with registers that can be used in forming tail calls.
bool canRealignStack(const MachineFunction &MF) const override
BitVector getReservedRegs(const MachineFunction &MF) const override
getReservedRegs - Returns a bitset indexed by physical register number indicating if a register is a ...
bool shouldRealignStack(const MachineFunction &MF) const override
unsigned getNumSupportedRegs(const MachineFunction &MF) const override
Return the number of registers for the function.
Register getFrameRegister(const MachineFunction &MF) const override
unsigned findDeadCallerSavedReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI) const
findDeadCallerSavedReg - Return a caller-saved register that isn't live when it reaches the "return" ...
const uint32_t * getDarwinTLSCallPreservedMask() const
bool isTileRegisterClass(const TargetRegisterClass *RC) const
Return true if it is tile register class.
const uint32_t * getCallPreservedMask(const MachineFunction &MF, CallingConv::ID) const override
bool isArgumentRegister(const MachineFunction &MF, MCRegister Reg) const override
isArgumentReg - Returns true if Reg can be used as an argument to a function.
Register getStackRegister() const
const TargetRegisterClass * getLargestLegalSuperClass(const TargetRegisterClass *RC, const MachineFunction &MF) const override
const TargetRegisterClass * getMatchingSuperRegClass(const TargetRegisterClass *A, const TargetRegisterClass *B, unsigned Idx) const override
getMatchingSuperRegClass - Return a subclass of the specified register class A so that each register ...
unsigned getRegPressureLimit(const TargetRegisterClass *RC, MachineFunction &MF) const override
unsigned getPtrSizedStackRegister(const MachineFunction &MF) const
int getSEHRegNum(unsigned i) const
bool shouldRewriteCopySrc(const TargetRegisterClass *DefRC, unsigned DefSubReg, const TargetRegisterClass *SrcRC, unsigned SrcSubReg) const override
const TargetRegisterClass * getCrossCopyRegClass(const TargetRegisterClass *RC) const override
getCrossCopyRegClass - Returns a legal register class to copy a register in the specified class to or...
X86RegisterInfo(const Triple &TT)
Register getBaseRegister() const
bool getRegAllocationHints(Register VirtReg, ArrayRef< MCPhysReg > Order, SmallVectorImpl< MCPhysReg > &Hints, const MachineFunction &MF, const VirtRegMap *VRM, const LiveRegMatrix *Matrix) const override
void eliminateFrameIndex(MachineBasicBlock::iterator II, unsigned FIOperandNum, Register BaseReg, int FIOffset) const
const uint32_t * getNoPreservedMask() const override
bool isFixedRegister(const MachineFunction &MF, MCRegister PhysReg) const override
Returns true if PhysReg is a fixed register.
const TargetRegisterClass * getSubClassWithSubReg(const TargetRegisterClass *RC, unsigned Idx) const override
const MCPhysReg * getCalleeSavedRegs(const MachineFunction *MF) const override
getCalleeSavedRegs - Return a null-terminated list of all of the callee-save registers on this target...
void adjustStackMapLiveOutMask(uint32_t *Mask) const override
bool hasSSE1() const
Definition: X86Subtarget.h:193
const X86TargetLowering * getTargetLowering() const override
Definition: X86Subtarget.h:118
bool isTarget64BitILP32() const
Is this x86_64 with the ILP32 programming model (x32 ABI)?
Definition: X86Subtarget.h:173
bool isTarget64BitLP64() const
Is this x86_64 with the LP64 programming model (standard AMD64, no x32)?
Definition: X86Subtarget.h:178
bool hasAVX512() const
Definition: X86Subtarget.h:201
bool hasAVX() const
Definition: X86Subtarget.h:199
bool supportSwiftError() const override
Return true if the target supports swifterror attribute.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ X86_64_SysV
The C convention as specified in the x86-64 supplement to the System V ABI, used on most non-Windows ...
Definition: CallingConv.h:151
@ HiPE
Used by the High-Performance Erlang Compiler (HiPE).
Definition: CallingConv.h:53
@ CFGuard_Check
Special calling convention on Windows for calling the Control Guard Check ICall funtion.
Definition: CallingConv.h:82
@ PreserveMost
Used for runtime calls that preserves most registers.
Definition: CallingConv.h:63
@ AnyReg
OBSOLETED - Used for stack based JavaScript calls.
Definition: CallingConv.h:60
@ CXX_FAST_TLS
Used for access functions.
Definition: CallingConv.h:72
@ X86_INTR
x86 hardware interrupt context.
Definition: CallingConv.h:173
@ GHC
Used by the Glasgow Haskell Compiler (GHC).
Definition: CallingConv.h:50
@ Cold
Attempts to make code in the caller as efficient as possible under the assumption that the call is no...
Definition: CallingConv.h:47
@ PreserveAll
Used for runtime calls that preserves (almost) all registers.
Definition: CallingConv.h:66
@ Intel_OCL_BI
Used for Intel OpenCL built-ins.
Definition: CallingConv.h:147
@ PreserveNone
Used for runtime calls that preserves none general registers.
Definition: CallingConv.h:90
@ Win64
The C convention as implemented on Windows/x86-64 and AArch64.
Definition: CallingConv.h:159
@ SwiftTail
This follows the Swift calling convention in how arguments are passed but guarantees tail calls will ...
Definition: CallingConv.h:87
@ GRAAL
Used by GraalVM. Two additional registers are reserved.
Definition: CallingConv.h:255
@ X86_RegCall
Register calling convention used for parameters transfer optimization.
Definition: CallingConv.h:203
void initLLVMToSEHAndCVRegMapping(MCRegisterInfo *MRI)
unsigned getNonNDVariant(unsigned Opc)
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:443
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Offset
Definition: DWP.cpp:480
MCRegister getX86SubSuperRegister(MCRegister Reg, unsigned Size, bool High=false)
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1729
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:167
FormattedNumber format_hex(uint64_t N, unsigned Width, bool Upper=false)
format_hex - Output N as a fixed width hexadecimal.
Definition: Format.h:187
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition: STLExtras.h:1879