LLVM 23.0.0git
MachineFunction.cpp
Go to the documentation of this file.
1//===- MachineFunction.cpp ------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Collect native machine code information for a function. This allows
10// target-specific information about the generated code to be stored with each
11// function.
12//
13//===----------------------------------------------------------------------===//
14
16#include "llvm/ADT/BitVector.h"
17#include "llvm/ADT/DenseMap.h"
18#include "llvm/ADT/DenseSet.h"
19#include "llvm/ADT/STLExtras.h"
22#include "llvm/ADT/StringRef.h"
24#include "llvm/ADT/Twine.h"
44#include "llvm/Config/llvm-config.h"
45#include "llvm/IR/Attributes.h"
46#include "llvm/IR/BasicBlock.h"
47#include "llvm/IR/Constant.h"
48#include "llvm/IR/DataLayout.h"
51#include "llvm/IR/Function.h"
52#include "llvm/IR/GlobalValue.h"
53#include "llvm/IR/Instruction.h"
55#include "llvm/IR/Metadata.h"
56#include "llvm/IR/Module.h"
58#include "llvm/IR/Value.h"
59#include "llvm/MC/MCContext.h"
60#include "llvm/MC/MCSymbol.h"
61#include "llvm/MC/SectionKind.h"
71#include <algorithm>
72#include <cassert>
73#include <cstddef>
74#include <cstdint>
75#include <iterator>
76#include <string>
77#include <utility>
78#include <vector>
79
81
82using namespace llvm;
83
84#define DEBUG_TYPE "codegen"
85
87 "align-all-functions",
88 cl::desc("Force the alignment of all functions in log2 format (e.g. 4 "
89 "means align on 16B boundaries)."),
91
94
95 // clang-format off
96 switch(Prop) {
97 case P::FailedISel: return "FailedISel";
98 case P::IsSSA: return "IsSSA";
99 case P::Legalized: return "Legalized";
100 case P::NoPHIs: return "NoPHIs";
101 case P::NoVRegs: return "NoVRegs";
102 case P::RegBankSelected: return "RegBankSelected";
103 case P::Selected: return "Selected";
104 case P::TracksLiveness: return "TracksLiveness";
105 case P::TiedOpsRewritten: return "TiedOpsRewritten";
106 case P::FailsVerification: return "FailsVerification";
107 case P::FailedRegAlloc: return "FailedRegAlloc";
108 case P::TracksDebugUserValues: return "TracksDebugUserValues";
109 }
110 // clang-format on
111 llvm_unreachable("Invalid machine function property");
112}
113
115 if (!F.hasFnAttribute(Attribute::SafeStack))
116 return;
117
118 auto *Existing =
119 dyn_cast_or_null<MDTuple>(F.getMetadata(LLVMContext::MD_annotation));
120
121 if (!Existing || Existing->getNumOperands() != 2)
122 return;
123
124 auto *MetadataName = "unsafe-stack-size";
125 if (auto &N = Existing->getOperand(0)) {
126 if (N.equalsStr(MetadataName)) {
127 if (auto &Op = Existing->getOperand(1)) {
128 auto Val = mdconst::extract<ConstantInt>(Op)->getZExtValue();
129 FrameInfo.setUnsafeStackSize(Val);
130 }
131 }
132 }
133}
134
135// Pin the vtable to this file.
136void MachineFunction::Delegate::anchor() {}
137
139 const char *Separator = "";
140 for (BitVector::size_type I = 0; I < Properties.size(); ++I) {
141 if (!Properties[I])
142 continue;
143 OS << Separator << getPropertyName(static_cast<Property>(I));
144 Separator = ", ";
145 }
146}
147
148//===----------------------------------------------------------------------===//
149// MachineFunction implementation
150//===----------------------------------------------------------------------===//
151
152// Out-of-line virtual method.
154
156 MBB->getParent()->deleteMachineBasicBlock(MBB);
157}
158
160 const Function &F) {
161 if (auto MA = F.getFnStackAlign())
162 return *MA;
163 return STI.getFrameLowering()->getStackAlign();
164}
165
167 Attribute FPAttr = F.getFnAttribute("frame-pointer");
168 if (!FPAttr.isValid())
170
171 StringRef FP = FPAttr.getValueAsString();
174 .Case("non-leaf", FramePointerKind::NonLeaf)
175 .Case("non-leaf-no-reserve", FramePointerKind::NonLeafNoReserve)
176 .Case("reserved", FramePointerKind::Reserved)
179}
180
182 const TargetSubtargetInfo &STI, MCContext &Ctx,
183 unsigned FunctionNum)
184 : F(F), Target(Target), STI(STI), Ctx(Ctx) {
185 FunctionNumber = FunctionNum;
186 init();
187}
188
189void MachineFunction::handleInsertion(MachineInstr &MI) {
190 if (TheDelegate)
191 TheDelegate->MF_HandleInsertion(MI);
192}
193
194void MachineFunction::handleRemoval(MachineInstr &MI) {
195 if (TheDelegate)
196 TheDelegate->MF_HandleRemoval(MI);
197}
198
200 const MCInstrDesc &TID) {
201 if (TheDelegate)
202 TheDelegate->MF_HandleChangeDesc(MI, TID);
203}
204
205void MachineFunction::init() {
206 // Assume the function starts in SSA form with correct liveness.
207 Properties.setIsSSA();
208 Properties.setTracksLiveness();
209 RegInfo = new (Allocator) MachineRegisterInfo(this);
210
211 MFInfo = nullptr;
212
213 // We can realign the stack if the target supports it and the user hasn't
214 // explicitly asked us not to.
215 bool CanRealignSP = STI.getFrameLowering()->isStackRealignable() &&
216 !F.hasFnAttribute("no-realign-stack");
217 bool ForceRealignSP = F.hasFnAttribute(Attribute::StackAlignment) ||
218 F.hasFnAttribute("stackrealign");
219 FrameInfo = new (Allocator) MachineFrameInfo(
220 getFnStackAlignment(STI, F), /*StackRealignable=*/CanRealignSP,
221 /*ForcedRealign=*/ForceRealignSP && CanRealignSP);
223
224 setUnsafeStackSize(F, *FrameInfo);
225
226 if (F.hasFnAttribute(Attribute::StackAlignment))
227 FrameInfo->ensureMaxAlignment(*F.getFnStackAlign());
228
230 Alignment = STI.getTargetLowering()->getMinFunctionAlignment();
231
232 // -fsanitize=function and -fsanitize=kcfi instrument indirect function calls
233 // to load a type hash before the function label. Ensure functions are aligned
234 // by a least 4 to avoid unaligned access, which is especially important for
235 // -mno-unaligned-access.
236 if (F.hasMetadata(LLVMContext::MD_func_sanitize) ||
237 F.getMetadata(LLVMContext::MD_kcfi_type))
238 Alignment = std::max(Alignment, Align(4));
239
241 Alignment = Align(1ULL << AlignAllFunctions);
242
243 JumpTableInfo = nullptr;
244
246 F.hasPersonalityFn() ? F.getPersonalityFn() : nullptr))) {
247 WinEHInfo = new (Allocator) WinEHFuncInfo();
248 }
249
251 F.hasPersonalityFn() ? F.getPersonalityFn() : nullptr))) {
252 WasmEHInfo = new (Allocator) WasmEHFuncInfo();
253 }
254
255 if (!Target.isCompatibleDataLayout(getDataLayout())) {
257 formatv("Can't create a MachineFunction using a Module with a "
258 "Target-incompatible DataLayout attached\n Target "
259 "DataLayout: {0}\n Module DataLayout: {1}\n",
260 Target.createDataLayout().getStringRepresentation(),
261 getDataLayout().getStringRepresentation()));
262 }
263
264 PSVManager = std::make_unique<PseudoSourceValueManager>(getTarget());
265}
266
268 const TargetSubtargetInfo &STI) {
269 assert(!MFInfo && "MachineFunctionInfo already set");
270 MFInfo = Target.createMachineFunctionInfo(Allocator, F, &STI);
271}
272
276
277void MachineFunction::clear() {
278 Properties.reset();
279
280 // Clear JumpTableInfo first. Otherwise, every MBB we delete would do a
281 // linear search over the jump table entries to find and erase itself.
282 if (JumpTableInfo) {
283 JumpTableInfo->~MachineJumpTableInfo();
284 Allocator.Deallocate(JumpTableInfo);
285 JumpTableInfo = nullptr;
286 }
287
288 // Don't call destructors on MachineInstr and MachineOperand. All of their
289 // memory comes from the BumpPtrAllocator which is about to be purged.
290 //
291 // Do call MachineBasicBlock destructors, it contains std::vectors.
292 for (iterator I = begin(), E = end(); I != E; I = BasicBlocks.erase(I))
293 I->Insts.clearAndLeakNodesUnsafely();
294 MBBNumbering.clear();
295
296 InstructionRecycler.clear(Allocator);
297 OperandRecycler.clear(Allocator);
298 BasicBlockRecycler.clear(Allocator);
299 CodeViewAnnotations.clear();
301 if (RegInfo) {
302 RegInfo->~MachineRegisterInfo();
303 Allocator.Deallocate(RegInfo);
304 }
305 if (MFInfo) {
306 MFInfo->~MachineFunctionInfo();
307 Allocator.Deallocate(MFInfo);
308 }
309
310 FrameInfo->~MachineFrameInfo();
311 Allocator.Deallocate(FrameInfo);
312
313 ConstantPool->~MachineConstantPool();
314 Allocator.Deallocate(ConstantPool);
315
316 if (WinEHInfo) {
317 WinEHInfo->~WinEHFuncInfo();
318 Allocator.Deallocate(WinEHInfo);
319 }
320
321 if (WasmEHInfo) {
322 WasmEHInfo->~WasmEHFuncInfo();
323 Allocator.Deallocate(WasmEHInfo);
324 }
325}
326
328 return F.getDataLayout();
329}
330
331/// Get the JumpTableInfo for this function.
332/// If it does not already exist, allocate one.
334getOrCreateJumpTableInfo(unsigned EntryKind) {
335 if (JumpTableInfo) return JumpTableInfo;
336
337 JumpTableInfo = new (Allocator)
339 return JumpTableInfo;
340}
341
343 return F.getDenormalMode(FPType);
344}
345
346/// Should we be emitting segmented stack stuff for the function
348 return getFunction().hasFnAttribute("split-stack");
349}
350
352 Align PrefAlignment;
353
354 if (MaybeAlign A = F.getPreferredAlignment())
355 PrefAlignment = *A;
356 else if (!F.hasOptSize())
357 PrefAlignment = STI.getTargetLowering()->getPrefFunctionAlignment();
358 else
359 PrefAlignment = Align(1);
360
361 return std::max(PrefAlignment, getAlignment());
362}
363
364[[nodiscard]] unsigned
366 FrameInstructions.push_back(Inst);
367 return FrameInstructions.size() - 1;
368}
369
370/// This discards all of the MachineBasicBlock numbers and recomputes them.
371/// This guarantees that the MBB numbers are sequential, dense, and match the
372/// ordering of the blocks within the function. If a specific MachineBasicBlock
373/// is specified, only that block and those after it are renumbered.
375 if (empty()) { MBBNumbering.clear(); return; }
377 if (MBB == nullptr)
378 MBBI = begin();
379 else
380 MBBI = MBB->getIterator();
381
382 // Figure out the block number this should have.
383 unsigned BlockNo = 0;
384 if (MBBI != begin())
385 BlockNo = std::prev(MBBI)->getNumber() + 1;
386
387 for (; MBBI != E; ++MBBI, ++BlockNo) {
388 if (MBBI->getNumber() != (int)BlockNo) {
389 // Remove use of the old number.
390 if (MBBI->getNumber() != -1) {
391 assert(MBBNumbering[MBBI->getNumber()] == &*MBBI &&
392 "MBB number mismatch!");
393 MBBNumbering[MBBI->getNumber()] = nullptr;
394 }
395
396 // If BlockNo is already taken, set that block's number to -1.
397 if (MBBNumbering[BlockNo])
398 MBBNumbering[BlockNo]->setNumber(-1);
399
400 MBBNumbering[BlockNo] = &*MBBI;
401 MBBI->setNumber(BlockNo);
402 }
403 }
404
405 // Okay, all the blocks are renumbered. If we have compactified the block
406 // numbering, shrink MBBNumbering now.
407 assert(BlockNo <= MBBNumbering.size() && "Mismatch!");
408 MBBNumbering.resize(BlockNo);
409}
410
413 const Align FunctionAlignment = getAlignment();
415 /// Offset - Distance from the beginning of the function to the end
416 /// of the basic block.
417 int64_t Offset = 0;
418
419 for (; MBBI != E; ++MBBI) {
420 const Align Alignment = MBBI->getAlignment();
421 int64_t BlockSize = 0;
422
423 for (auto &MI : *MBBI) {
424 BlockSize += TII.getInstSizeInBytes(MI);
425 }
426
427 int64_t OffsetBB;
428 if (Alignment <= FunctionAlignment) {
429 OffsetBB = alignTo(Offset, Alignment);
430 } else {
431 // The alignment of this MBB is larger than the function's alignment, so
432 // we can't tell whether or not it will insert nops. Assume that it will.
433 OffsetBB = alignTo(Offset, Alignment) + Alignment.value() -
434 FunctionAlignment.value();
435 }
436 Offset = OffsetBB + BlockSize;
437 }
438
439 return Offset;
440}
441
442/// This method iterates over the basic blocks and assigns their IsBeginSection
443/// and IsEndSection fields. This must be called after MBB layout is finalized
444/// and the SectionID's are assigned to MBBs.
447 auto CurrentSectionID = front().getSectionID();
448 for (auto MBBI = std::next(begin()), E = end(); MBBI != E; ++MBBI) {
449 if (MBBI->getSectionID() == CurrentSectionID)
450 continue;
451 MBBI->setIsBeginSection();
452 std::prev(MBBI)->setIsEndSection();
453 CurrentSectionID = MBBI->getSectionID();
454 }
456}
457
458/// Allocate a new MachineInstr. Use this instead of `new MachineInstr'.
459MachineInstr *MachineFunction::CreateMachineInstr(const MCInstrDesc &MCID,
460 DebugLoc DL,
461 bool NoImplicit) {
462 return new (InstructionRecycler.Allocate<MachineInstr>(Allocator))
463 MachineInstr(*this, MCID, std::move(DL), NoImplicit);
464}
465
466/// Create a new MachineInstr which is a copy of the 'Orig' instruction,
467/// identical in all ways except the instruction has no parent, prev, or next.
469MachineFunction::CloneMachineInstr(const MachineInstr *Orig) {
470 return new (InstructionRecycler.Allocate<MachineInstr>(Allocator))
471 MachineInstr(*this, *Orig);
472}
473
474MachineInstr &MachineFunction::cloneMachineInstrBundle(
476 const MachineInstr &Orig) {
477 MachineInstr *FirstClone = nullptr;
479 while (true) {
480 MachineInstr *Cloned = CloneMachineInstr(&*I);
481 MBB.insert(InsertBefore, Cloned);
482 if (FirstClone == nullptr) {
483 FirstClone = Cloned;
484 } else {
485 Cloned->bundleWithPred();
486 }
487
488 if (!I->isBundledWithSucc())
489 break;
490 ++I;
491 }
492 // Copy over call info to the cloned instruction if needed. If Orig is in
493 // a bundle, copyAdditionalCallInfo takes care of finding the call instruction
494 // in the bundle.
496 copyAdditionalCallInfo(&Orig, FirstClone);
497 return *FirstClone;
498}
499
500/// Delete the given MachineInstr.
501///
502/// This function also serves as the MachineInstr destructor - the real
503/// ~MachineInstr() destructor must be empty.
504void MachineFunction::deleteMachineInstr(MachineInstr *MI) {
505 // Verify that a call site info is at valid state. This assertion should
506 // be triggered during the implementation of support for the
507 // call site info of a new architecture. If the assertion is triggered,
508 // back trace will tell where to insert a call to updateCallSiteInfo().
509 assert((!MI->isCandidateForAdditionalCallInfo() ||
510 !CallSitesInfo.contains(MI)) &&
511 "Call site info was not updated!");
512 // Verify that the "called globals" info is in a valid state.
513 assert((!MI->isCandidateForAdditionalCallInfo() ||
514 !CalledGlobalsInfo.contains(MI)) &&
515 "Called globals info was not updated!");
516 // Strip it for parts. The operand array and the MI object itself are
517 // independently recyclable.
518 if (MI->Operands)
519 deallocateOperandArray(MI->CapOperands, MI->Operands);
520 // Don't call ~MachineInstr() which must be trivial anyway because
521 // ~MachineFunction drops whole lists of MachineInstrs wihout calling their
522 // destructors.
523 InstructionRecycler.Deallocate(Allocator, MI);
524}
525
526/// Allocate a new MachineBasicBlock. Use this instead of
527/// `new MachineBasicBlock'.
530 std::optional<UniqueBBID> BBID) {
532 new (BasicBlockRecycler.Allocate<MachineBasicBlock>(Allocator))
533 MachineBasicBlock(*this, BB);
534 // Set BBID for `-basic-block-sections=list` and `-basic-block-address-map` to
535 // allow robust mapping of profiles to basic blocks.
536 if (Target.Options.BBAddrMap ||
537 Target.getBBSectionsType() == BasicBlockSection::List)
538 MBB->setBBID(BBID.has_value() ? *BBID : UniqueBBID{NextBBID++, 0});
539 return MBB;
540}
541
542/// Delete the given MachineBasicBlock.
544 assert(MBB->getParent() == this && "MBB parent mismatch!");
545 // Clean up any references to MBB in jump tables before deleting it.
546 if (JumpTableInfo)
547 JumpTableInfo->RemoveMBBFromJumpTables(MBB);
548 MBB->~MachineBasicBlock();
549 BasicBlockRecycler.Deallocate(Allocator, MBB);
550}
551
554 Align BaseAlignment, const AAMDNodes &AAInfo, const MDNode *Ranges,
555 SyncScope::ID SSID, AtomicOrdering Ordering,
556 AtomicOrdering FailureOrdering) {
557 assert((!Size.hasValue() ||
558 Size.getValue().getKnownMinValue() != ~UINT64_C(0)) &&
559 "Unexpected an unknown size to be represented using "
560 "LocationSize::beforeOrAfter()");
561 return new (Allocator)
562 MachineMemOperand(PtrInfo, F, Size, BaseAlignment, AAInfo, Ranges, SSID,
563 Ordering, FailureOrdering);
564}
565
568 Align base_alignment, const AAMDNodes &AAInfo, const MDNode *Ranges,
569 SyncScope::ID SSID, AtomicOrdering Ordering,
570 AtomicOrdering FailureOrdering) {
571 return new (Allocator)
572 MachineMemOperand(PtrInfo, f, MemTy, base_alignment, AAInfo, Ranges, SSID,
573 Ordering, FailureOrdering);
574}
575
578 const MachinePointerInfo &PtrInfo,
580 assert((!Size.hasValue() ||
581 Size.getValue().getKnownMinValue() != ~UINT64_C(0)) &&
582 "Unexpected an unknown size to be represented using "
583 "LocationSize::beforeOrAfter()");
584 return new (Allocator)
585 MachineMemOperand(PtrInfo, MMO->getFlags(), Size, MMO->getBaseAlign(),
586 AAMDNodes(), nullptr, MMO->getSyncScopeID(),
588}
589
591 const MachineMemOperand *MMO, const MachinePointerInfo &PtrInfo, LLT Ty) {
592 return new (Allocator)
593 MachineMemOperand(PtrInfo, MMO->getFlags(), Ty, MMO->getBaseAlign(),
594 AAMDNodes(), nullptr, MMO->getSyncScopeID(),
596}
597
600 int64_t Offset, LLT Ty) {
601 const MachinePointerInfo &PtrInfo = MMO->getPointerInfo();
602
603 // If there is no pointer value, the offset isn't tracked so we need to adjust
604 // the base alignment.
605 Align Alignment = PtrInfo.V.isNull()
607 : MMO->getBaseAlign();
608
609 // Do not preserve ranges, since we don't necessarily know what the high bits
610 // are anymore.
611 return new (Allocator) MachineMemOperand(
612 PtrInfo.getWithOffset(Offset), MMO->getFlags(), Ty, Alignment,
613 MMO->getAAInfo(), nullptr, MMO->getSyncScopeID(),
615}
616
619 const AAMDNodes &AAInfo) {
620 MachinePointerInfo MPI = MMO->getValue() ?
621 MachinePointerInfo(MMO->getValue(), MMO->getOffset()) :
623
624 return new (Allocator) MachineMemOperand(
625 MPI, MMO->getFlags(), MMO->getSize(), MMO->getBaseAlign(), AAInfo,
626 MMO->getRanges(), MMO->getSyncScopeID(), MMO->getSuccessOrdering(),
627 MMO->getFailureOrdering());
628}
629
633 return new (Allocator) MachineMemOperand(
634 MMO->getPointerInfo(), Flags, MMO->getSize(), MMO->getBaseAlign(),
635 MMO->getAAInfo(), MMO->getRanges(), MMO->getSyncScopeID(),
637}
638
639MachineInstr::ExtraInfo *MachineFunction::createMIExtraInfo(
640 ArrayRef<MachineMemOperand *> MMOs, MCSymbol *PreInstrSymbol,
641 MCSymbol *PostInstrSymbol, MDNode *HeapAllocMarker, MDNode *PCSections,
642 uint32_t CFIType, MDNode *MMRAs, Value *DS) {
643 return MachineInstr::ExtraInfo::create(Allocator, MMOs, PreInstrSymbol,
644 PostInstrSymbol, HeapAllocMarker,
645 PCSections, CFIType, MMRAs, DS);
646}
647
649 char *Dest = Allocator.Allocate<char>(Name.size() + 1);
650 llvm::copy(Name, Dest);
651 Dest[Name.size()] = 0;
652 return Dest;
653}
654
656 unsigned NumRegs = getSubtarget().getRegisterInfo()->getNumRegs();
657 unsigned Size = MachineOperand::getRegMaskSize(NumRegs);
658 uint32_t *Mask = Allocator.Allocate<uint32_t>(Size);
659 memset(Mask, 0, Size * sizeof(Mask[0]));
660 return Mask;
661}
662
664 int* AllocMask = Allocator.Allocate<int>(Mask.size());
665 copy(Mask, AllocMask);
666 return {AllocMask, Mask.size()};
667}
668
669#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
673#endif
674
678
679void MachineFunction::print(raw_ostream &OS, const SlotIndexes *Indexes) const {
680 OS << "# Machine code for function " << getName() << ": ";
681 getProperties().print(OS);
682 OS << '\n';
683
684 // Print Frame Information
685 FrameInfo->print(*this, OS);
686
687 // Print JumpTable Information
688 if (JumpTableInfo)
689 JumpTableInfo->print(OS);
690
691 // Print Constant Pool
692 ConstantPool->print(OS);
693
695
696 if (RegInfo && !RegInfo->livein_empty()) {
697 OS << "Function Live Ins: ";
699 I = RegInfo->livein_begin(), E = RegInfo->livein_end(); I != E; ++I) {
700 OS << printReg(I->first, TRI);
701 if (I->second)
702 OS << " in " << printReg(I->second, TRI);
703 if (std::next(I) != E)
704 OS << ", ";
705 }
706 OS << '\n';
707 }
708
711 for (const auto &BB : *this) {
712 OS << '\n';
713 // If we print the whole function, print it at its most verbose level.
714 BB.print(OS, MST, Indexes, /*IsStandalone=*/true);
715 }
716
717 OS << "\n# End machine code for function " << getName() << ".\n\n";
718}
719
720/// True if this function needs frame moves for debug or exceptions.
722 // TODO: Ideally, what we'd like is to have a switch that allows emitting
723 // synchronous (precise at call-sites only) CFA into .eh_frame. However, even
724 // under this switch, we'd like .debug_frame to be precise when using -g. At
725 // this moment, there's no way to specify that some CFI directives go into
726 // .eh_frame only, while others go into .debug_frame only.
728 F.needsUnwindTableEntry() ||
729 !F.getParent()->debug_compile_units().empty();
730}
731
733 if (MDNode *Node = CB.getMetadata(llvm::LLVMContext::MD_call_target))
735
736 // Numeric callee_type ids are only for indirect calls.
737 if (!CB.isIndirectCall())
738 return;
739
740 MDNode *CalleeTypeList = CB.getMetadata(LLVMContext::MD_callee_type);
741 if (!CalleeTypeList)
742 return;
743
744 for (const MDOperand &Op : CalleeTypeList->operands()) {
745 MDNode *TypeMD = cast<MDNode>(Op);
746 MDString *TypeIdStr = cast<MDString>(TypeMD->getOperand(1));
747 // Compute numeric type id from generalized type id string
748 uint64_t TypeIdVal = MD5Hash(TypeIdStr->getString());
749 IntegerType *Int64Ty = Type::getInt64Ty(CB.getContext());
750 CalleeTypeIds.push_back(
751 ConstantInt::get(Int64Ty, TypeIdVal, /*IsSigned=*/false));
752 }
753}
754
755template <>
757 : public DefaultDOTGraphTraits {
759
760 static std::string getGraphName(const MachineFunction *F) {
761 return ("CFG for '" + F->getName() + "' function").str();
762 }
763
765 const MachineFunction *Graph) {
766 std::string OutStr;
767 {
768 raw_string_ostream OSS(OutStr);
769
770 if (isSimple()) {
771 OSS << printMBBReference(*Node);
772 if (const BasicBlock *BB = Node->getBasicBlock())
773 OSS << ": " << BB->getName();
774 } else
775 Node->print(OSS);
776 }
777
778 if (OutStr[0] == '\n')
779 OutStr.erase(OutStr.begin());
780
781 // Process string output to make it nicer...
782 for (unsigned i = 0; i != OutStr.length(); ++i)
783 if (OutStr[i] == '\n') { // Left justify
784 OutStr[i] = '\\';
785 OutStr.insert(OutStr.begin() + i + 1, 'l');
786 }
787 return OutStr;
788 }
789};
790
792{
793#ifndef NDEBUG
794 ViewGraph(this, "mf" + getName());
795#else
796 errs() << "MachineFunction::viewCFG is only available in debug builds on "
797 << "systems with Graphviz or gv!\n";
798#endif // NDEBUG
799}
800
802{
803#ifndef NDEBUG
804 ViewGraph(this, "mf" + getName(), true);
805#else
806 errs() << "MachineFunction::viewCFGOnly is only available in debug builds on "
807 << "systems with Graphviz or gv!\n";
808#endif // NDEBUG
809}
810
811/// Add the specified physical register as a live-in value and
812/// create a corresponding virtual register for it.
814 const TargetRegisterClass *RC) {
816 Register VReg = MRI.getLiveInVirtReg(PReg);
817 if (VReg) {
818 const TargetRegisterClass *VRegRC = MRI.getRegClass(VReg);
819 (void)VRegRC;
820 // A physical register can be added several times.
821 // Between two calls, the register class of the related virtual register
822 // may have been constrained to match some operation constraints.
823 // In that case, check that the current register class includes the
824 // physical register and is a sub class of the specified RC.
825 assert((VRegRC == RC || (VRegRC->contains(PReg) &&
826 RC->hasSubClassEq(VRegRC))) &&
827 "Register class mismatch!");
828 return VReg;
829 }
830 VReg = MRI.createVirtualRegister(RC);
831 MRI.addLiveIn(PReg, VReg);
832 return VReg;
833}
834
835/// Return the MCSymbol for the specified non-empty jump table.
836/// If isLinkerPrivate is specified, an 'l' label is returned, otherwise a
837/// normal 'L' label is returned.
839 bool isLinkerPrivate) const {
840 const DataLayout &DL = getDataLayout();
841 assert(JumpTableInfo && "No jump tables");
842 assert(JTI < JumpTableInfo->getJumpTables().size() && "Invalid JTI!");
843
844 StringRef Prefix = isLinkerPrivate ? DL.getLinkerPrivateGlobalPrefix()
845 : DL.getInternalSymbolPrefix();
846 SmallString<60> Name;
848 << Prefix << "JTI" << getFunctionNumber() << '_' << JTI;
849 return Ctx.getOrCreateSymbol(Name);
850}
851
852/// Return a function-local symbol to represent the PIC base.
854 const DataLayout &DL = getDataLayout();
855 return Ctx.getOrCreateSymbol(Twine(DL.getInternalSymbolPrefix()) +
856 Twine(getFunctionNumber()) + "$pb");
857}
858
859/// \name Exception Handling
860/// \{
861
864 unsigned N = LandingPads.size();
865 for (unsigned i = 0; i < N; ++i) {
866 LandingPadInfo &LP = LandingPads[i];
867 if (LP.LandingPadBlock == LandingPad)
868 return LP;
869 }
870
871 LandingPads.push_back(LandingPadInfo(LandingPad));
872 return LandingPads[N];
873}
874
876 MCSymbol *BeginLabel, MCSymbol *EndLabel) {
878 LP.BeginLabels.push_back(BeginLabel);
879 LP.EndLabels.push_back(EndLabel);
880}
881
883 MCSymbol *LandingPadLabel = Ctx.createTempSymbol();
885 LP.LandingPadLabel = LandingPadLabel;
886
888 LandingPad->getBasicBlock()->getFirstNonPHIIt();
889 if (const auto *LPI = dyn_cast<LandingPadInst>(FirstI)) {
890 // If there's no typeid list specified, then "cleanup" is implicit.
891 // Otherwise, id 0 is reserved for the cleanup action.
892 if (LPI->isCleanup() && LPI->getNumClauses() != 0)
893 LP.TypeIds.push_back(0);
894
895 // FIXME: New EH - Add the clauses in reverse order. This isn't 100%
896 // correct, but we need to do it this way because of how the DWARF EH
897 // emitter processes the clauses.
898 for (unsigned I = LPI->getNumClauses(); I != 0; --I) {
899 Value *Val = LPI->getClause(I - 1);
900 if (LPI->isCatch(I - 1)) {
901 LP.TypeIds.push_back(
903 } else {
904 // Add filters in a list.
905 auto *CVal = cast<Constant>(Val);
906 SmallVector<unsigned, 4> FilterList;
907 for (const Use &U : CVal->operands())
908 FilterList.push_back(
909 getTypeIDFor(cast<GlobalValue>(U->stripPointerCasts())));
910
911 LP.TypeIds.push_back(getFilterIDFor(FilterList));
912 }
913 }
914
915 } else if (const auto *CPI = dyn_cast<CatchPadInst>(FirstI)) {
916 for (unsigned I = CPI->arg_size(); I != 0; --I) {
917 auto *TypeInfo =
918 dyn_cast<GlobalValue>(CPI->getArgOperand(I - 1)->stripPointerCasts());
919 LP.TypeIds.push_back(getTypeIDFor(TypeInfo));
920 }
921
922 } else {
923 assert(isa<CleanupPadInst>(FirstI) && "Invalid landingpad!");
924 }
925
926 return LandingPadLabel;
927}
928
930 ArrayRef<unsigned> Sites) {
931 LPadToCallSiteMap[Sym].append(Sites.begin(), Sites.end());
932}
933
935 for (unsigned i = 0, N = TypeInfos.size(); i != N; ++i)
936 if (TypeInfos[i] == TI) return i + 1;
937
938 TypeInfos.push_back(TI);
939 return TypeInfos.size();
940}
941
943 // If the new filter coincides with the tail of an existing filter, then
944 // re-use the existing filter. Folding filters more than this requires
945 // re-ordering filters and/or their elements - probably not worth it.
946 for (unsigned i : FilterEnds) {
947 unsigned j = TyIds.size();
948
949 while (i && j)
950 if (FilterIds[--i] != TyIds[--j])
951 goto try_next;
952
953 if (!j)
954 // The new filter coincides with range [i, end) of the existing filter.
955 return -(1 + i);
956
957try_next:;
958 }
959
960 // Add the new filter.
961 int FilterID = -(1 + FilterIds.size());
962 FilterIds.reserve(FilterIds.size() + TyIds.size() + 1);
963 llvm::append_range(FilterIds, TyIds);
964 FilterEnds.push_back(FilterIds.size());
965 FilterIds.push_back(0); // terminator
966 return FilterID;
967}
968
970MachineFunction::getCallSiteInfo(const MachineInstr *MI) {
971 assert(MI->isCandidateForAdditionalCallInfo() &&
972 "Call site info refers only to call (MI) candidates");
973
974 if (!Target.Options.EmitCallSiteInfo && !Target.Options.EmitCallGraphSection)
975 return CallSitesInfo.end();
976 return CallSitesInfo.find(MI);
977}
978
979/// Return the call machine instruction or find a call within bundle.
981 if (!MI->isBundle())
982 return MI;
983
984 for (const auto &BMI : make_range(getBundleStart(MI->getIterator()),
985 getBundleEnd(MI->getIterator())))
986 if (BMI.isCandidateForAdditionalCallInfo())
987 return &BMI;
988
989 llvm_unreachable("Unexpected bundle without a call site candidate");
990}
991
993 assert(MI->shouldUpdateAdditionalCallInfo() &&
994 "Call info refers only to call (MI) candidates or "
995 "candidates inside bundles");
996
997 const MachineInstr *CallMI = getCallInstr(MI);
998
999 CallSiteInfoMap::iterator CSIt = getCallSiteInfo(CallMI);
1000 if (CSIt != CallSitesInfo.end())
1001 CallSitesInfo.erase(CSIt);
1002
1003 CalledGlobalsInfo.erase(CallMI);
1004}
1005
1007 const MachineInstr *New) {
1009 "Call info refers only to call (MI) candidates or "
1010 "candidates inside bundles");
1011
1012 if (!New->isCandidateForAdditionalCallInfo())
1013 return eraseAdditionalCallInfo(Old);
1014
1015 const MachineInstr *OldCallMI = getCallInstr(Old);
1016 CallSiteInfoMap::iterator CSIt = getCallSiteInfo(OldCallMI);
1017 if (CSIt != CallSitesInfo.end()) {
1018 CallSiteInfo CSInfo = CSIt->second;
1019 CallSitesInfo[New] = std::move(CSInfo);
1020 }
1021
1022 CalledGlobalsMap::iterator CGIt = CalledGlobalsInfo.find(OldCallMI);
1023 if (CGIt != CalledGlobalsInfo.end()) {
1024 CalledGlobalInfo CGInfo = CGIt->second;
1025 CalledGlobalsInfo[New] = std::move(CGInfo);
1026 }
1027}
1028
1030 const MachineInstr *New) {
1032 "Call info refers only to call (MI) candidates or "
1033 "candidates inside bundles");
1034
1035 if (!New->isCandidateForAdditionalCallInfo())
1036 return eraseAdditionalCallInfo(Old);
1037
1038 const MachineInstr *OldCallMI = getCallInstr(Old);
1039 CallSiteInfoMap::iterator CSIt = getCallSiteInfo(OldCallMI);
1040 if (CSIt != CallSitesInfo.end()) {
1041 CallSiteInfo CSInfo = std::move(CSIt->second);
1042 CallSitesInfo.erase(CSIt);
1043 CallSitesInfo[New] = std::move(CSInfo);
1044 }
1045
1046 CalledGlobalsMap::iterator CGIt = CalledGlobalsInfo.find(OldCallMI);
1047 if (CGIt != CalledGlobalsInfo.end()) {
1048 CalledGlobalInfo CGInfo = std::move(CGIt->second);
1049 CalledGlobalsInfo.erase(CGIt);
1050 CalledGlobalsInfo[New] = std::move(CGInfo);
1051 }
1052}
1053
1057
1060 unsigned Subreg) {
1061 // Catch any accidental self-loops.
1062 assert(A.first != B.first);
1063 // Don't allow any substitutions _from_ the memory operand number.
1064 assert(A.second != DebugOperandMemNumber);
1065
1066 DebugValueSubstitutions.push_back({A, B, Subreg});
1067}
1068
1070 MachineInstr &New,
1071 unsigned MaxOperand) {
1072 // If the Old instruction wasn't tracked at all, there is no work to do.
1073 unsigned OldInstrNum = Old.peekDebugInstrNum();
1074 if (!OldInstrNum)
1075 return;
1076
1077 // Iterate over all operands looking for defs to create substitutions for.
1078 // Avoid creating new instr numbers unless we create a new substitution.
1079 // While this has no functional effect, it risks confusing someone reading
1080 // MIR output.
1081 // Examine all the operands, or the first N specified by the caller.
1082 MaxOperand = std::min(MaxOperand, Old.getNumOperands());
1083 for (unsigned int I = 0; I < MaxOperand; ++I) {
1084 const auto &OldMO = Old.getOperand(I);
1085 auto &NewMO = New.getOperand(I);
1086 (void)NewMO;
1087
1088 if (!OldMO.isReg() || !OldMO.isDef())
1089 continue;
1090 assert(NewMO.isDef());
1091
1092 unsigned NewInstrNum = New.getDebugInstrNum();
1093 makeDebugValueSubstitution(std::make_pair(OldInstrNum, I),
1094 std::make_pair(NewInstrNum, I));
1095 }
1096}
1097
1102
1103 // Check whether this copy-like instruction has already been salvaged into
1104 // an operand pair.
1105 Register Dest;
1106 if (auto CopyDstSrc = TII.isCopyLikeInstr(MI)) {
1107 Dest = CopyDstSrc->Destination->getReg();
1108 } else {
1109 assert(MI.isSubregToReg());
1110 Dest = MI.getOperand(0).getReg();
1111 }
1112
1113 auto CacheIt = DbgPHICache.find(Dest);
1114 if (CacheIt != DbgPHICache.end())
1115 return CacheIt->second;
1116
1117 // Calculate the instruction number to use, or install a DBG_PHI.
1118 auto OperandPair = salvageCopySSAImpl(MI);
1119 DbgPHICache.insert({Dest, OperandPair});
1120 return OperandPair;
1121}
1122
1128
1129 // Chase the value read by a copy-like instruction back to the instruction
1130 // that ultimately _defines_ that value. This may pass:
1131 // * Through multiple intermediate copies, including subregister moves /
1132 // copies,
1133 // * Copies from physical registers that must then be traced back to the
1134 // defining instruction,
1135 // * Or, physical registers may be live-in to (only) the entry block, which
1136 // requires a DBG_PHI to be created.
1137 // We can pursue this problem in that order: trace back through copies,
1138 // optionally through a physical register, to a defining instruction. We
1139 // should never move from physreg to vreg. As we're still in SSA form, no need
1140 // to worry about partial definitions of registers.
1141
1142 // Helper lambda to interpret a copy-like instruction. Takes instruction,
1143 // returns the register read and any subregister identifying which part is
1144 // read.
1145 auto GetRegAndSubreg =
1146 [&](const MachineInstr &Cpy) -> std::pair<Register, unsigned> {
1147 Register NewReg, OldReg;
1148 unsigned SubReg;
1149 if (Cpy.isCopy()) {
1150 OldReg = Cpy.getOperand(0).getReg();
1151 NewReg = Cpy.getOperand(1).getReg();
1152 SubReg = Cpy.getOperand(1).getSubReg();
1153 } else if (Cpy.isSubregToReg()) {
1154 OldReg = Cpy.getOperand(0).getReg();
1155 NewReg = Cpy.getOperand(1).getReg();
1156 SubReg = Cpy.getOperand(2).getImm();
1157 } else {
1158 auto CopyDetails = *TII.isCopyInstr(Cpy);
1159 const MachineOperand &Src = *CopyDetails.Source;
1160 const MachineOperand &Dest = *CopyDetails.Destination;
1161 OldReg = Dest.getReg();
1162 NewReg = Src.getReg();
1163 SubReg = Src.getSubReg();
1164 }
1165
1166 return {NewReg, SubReg};
1167 };
1168
1169 // First seek either the defining instruction, or a copy from a physreg.
1170 // During search, the current state is the current copy instruction, and which
1171 // register we've read. Accumulate qualifying subregisters into SubregsSeen;
1172 // deal with those later.
1173 auto State = GetRegAndSubreg(MI);
1174 auto CurInst = MI.getIterator();
1175 SmallVector<unsigned, 4> SubregsSeen;
1176 while (true) {
1177 // If we've found a copy from a physreg, first portion of search is over.
1178 if (!State.first.isVirtual())
1179 break;
1180
1181 // Record any subregister qualifier.
1182 if (State.second)
1183 SubregsSeen.push_back(State.second);
1184
1185 assert(MRI.hasOneDef(State.first));
1186 MachineInstr &Inst = *MRI.def_begin(State.first)->getParent();
1187 CurInst = Inst.getIterator();
1188
1189 // Any non-copy instruction is the defining instruction we're seeking.
1190 if (!Inst.isCopyLike() && !TII.isCopyLikeInstr(Inst))
1191 break;
1192 State = GetRegAndSubreg(Inst);
1193 };
1194
1195 // Helper lambda to apply additional subregister substitutions to a known
1196 // instruction/operand pair. Adds new (fake) substitutions so that we can
1197 // record the subregister. FIXME: this isn't very space efficient if multiple
1198 // values are tracked back through the same copies; cache something later.
1199 auto ApplySubregisters =
1201 for (unsigned Subreg : reverse(SubregsSeen)) {
1202 // Fetch a new instruction number, not attached to an actual instruction.
1203 unsigned NewInstrNumber = getNewDebugInstrNum();
1204 // Add a substitution from the "new" number to the known one, with a
1205 // qualifying subreg.
1206 makeDebugValueSubstitution({NewInstrNumber, 0}, P, Subreg);
1207 // Return the new number; to find the underlying value, consumers need to
1208 // deal with the qualifying subreg.
1209 P = {NewInstrNumber, 0};
1210 }
1211 return P;
1212 };
1213
1214 // If we managed to find the defining instruction after COPYs, return an
1215 // instruction / operand pair after adding subregister qualifiers.
1216 if (State.first.isVirtual()) {
1217 // Virtual register def -- we can just look up where this happens.
1218 MachineInstr *Inst = MRI.def_begin(State.first)->getParent();
1219 for (auto &MO : Inst->all_defs()) {
1220 if (MO.getReg() != State.first)
1221 continue;
1222 return ApplySubregisters({Inst->getDebugInstrNum(), MO.getOperandNo()});
1223 }
1224
1225 llvm_unreachable("Vreg def with no corresponding operand?");
1226 }
1227
1228 // Our search ended in a copy from a physreg: walk back up the function
1229 // looking for whatever defines the physreg.
1230 assert(CurInst->isCopyLike() || TII.isCopyInstr(*CurInst));
1231 State = GetRegAndSubreg(*CurInst);
1232 Register RegToSeek = State.first;
1233
1234 auto RMII = CurInst->getReverseIterator();
1235 auto PrevInstrs = make_range(RMII, CurInst->getParent()->instr_rend());
1236 for (auto &ToExamine : PrevInstrs) {
1237 for (auto &MO : ToExamine.all_defs()) {
1238 // Test for operand that defines something aliasing RegToSeek.
1239 if (!TRI.regsOverlap(RegToSeek, MO.getReg()))
1240 continue;
1241
1242 return ApplySubregisters(
1243 {ToExamine.getDebugInstrNum(), MO.getOperandNo()});
1244 }
1245 }
1246
1247 MachineBasicBlock &InsertBB = *CurInst->getParent();
1248
1249 // We reached the start of the block before finding a defining instruction.
1250 // There are numerous scenarios where this can happen:
1251 // * Constant physical registers,
1252 // * Several intrinsics that allow LLVM-IR to read arbitary registers,
1253 // * Arguments in the entry block,
1254 // * Exception handling landing pads.
1255 // Validating all of them is too difficult, so just insert a DBG_PHI reading
1256 // the variable value at this position, rather than checking it makes sense.
1257
1258 // Create DBG_PHI for specified physreg.
1259 auto Builder = BuildMI(InsertBB, InsertBB.getFirstNonPHI(), DebugLoc(),
1260 TII.get(TargetOpcode::DBG_PHI));
1261 Builder.addReg(State.first);
1262 unsigned NewNum = getNewDebugInstrNum();
1263 Builder.addImm(NewNum);
1264 return ApplySubregisters({NewNum, 0u});
1265}
1266
1268 auto *TII = getSubtarget().getInstrInfo();
1269
1270 auto MakeUndefDbgValue = [&](MachineInstr &MI) {
1271 const MCInstrDesc &RefII = TII->get(TargetOpcode::DBG_VALUE_LIST);
1272 MI.setDesc(RefII);
1273 MI.setDebugValueUndef();
1274 };
1275
1277 for (auto &MBB : *this) {
1278 for (auto &MI : MBB) {
1279 if (!MI.isDebugRef())
1280 continue;
1281
1282 bool IsValidRef = true;
1283
1284 for (MachineOperand &MO : MI.debug_operands()) {
1285 if (!MO.isReg())
1286 continue;
1287
1288 Register Reg = MO.getReg();
1289
1290 // Some vregs can be deleted as redundant in the meantime. Mark those
1291 // as DBG_VALUE $noreg. Additionally, some normal instructions are
1292 // quickly deleted, leaving dangling references to vregs with no def.
1293 if (Reg == 0 || !RegInfo->hasOneDef(Reg)) {
1294 IsValidRef = false;
1295 break;
1296 }
1297
1298 assert(Reg.isVirtual());
1299 MachineInstr &DefMI = *RegInfo->def_instr_begin(Reg);
1300
1301 // If we've found a copy-like instruction, follow it back to the
1302 // instruction that defines the source value, see salvageCopySSA docs
1303 // for why this is important.
1304 if (DefMI.isCopyLike() || TII->isCopyInstr(DefMI)) {
1305 auto Result = salvageCopySSA(DefMI, ArgDbgPHIs);
1306 MO.ChangeToDbgInstrRef(Result.first, Result.second);
1307 } else {
1308 // Otherwise, identify the operand number that the VReg refers to.
1309 unsigned OperandIdx = 0;
1310 for (const auto &DefMO : DefMI.operands()) {
1311 if (DefMO.isReg() && DefMO.isDef() && DefMO.getReg() == Reg)
1312 break;
1313 ++OperandIdx;
1314 }
1315 assert(OperandIdx < DefMI.getNumOperands());
1316
1317 // Morph this instr ref to point at the given instruction and operand.
1318 unsigned ID = DefMI.getDebugInstrNum();
1319 MO.ChangeToDbgInstrRef(ID, OperandIdx);
1320 }
1321 }
1322
1323 if (!IsValidRef)
1324 MakeUndefDbgValue(MI);
1325 }
1326 }
1327}
1328
1330 // Disable instr-ref at -O0: it's very slow (in compile time). We can still
1331 // have optimized code inlined into this unoptimized code, however with
1332 // fewer and less aggressive optimizations happening, coverage and accuracy
1333 // should not suffer.
1334 if (getTarget().getOptLevel() == CodeGenOptLevel::None)
1335 return false;
1336
1337 // Don't use instr-ref if this function is marked optnone.
1338 if (F.hasFnAttribute(Attribute::OptimizeNone))
1339 return false;
1340
1341 if (llvm::debuginfoShouldUseDebugInstrRef(getTarget().getTargetTriple()))
1342 return true;
1343
1344 return false;
1345}
1346
1348 return UseDebugInstrRef;
1349}
1350
1354
1355// Use one million as a high / reserved number.
1356const unsigned MachineFunction::DebugOperandMemNumber = 1000000;
1357
1358/// \}
1359
1360//===----------------------------------------------------------------------===//
1361// MachineJumpTableInfo implementation
1362//===----------------------------------------------------------------------===//
1363
1365 const std::vector<MachineBasicBlock *> &MBBs)
1367
1368/// Return the size of each entry in the jump table.
1370 // The size of a jump table entry is 4 bytes unless the entry is just the
1371 // address of a block, in which case it is the pointer size.
1372 switch (getEntryKind()) {
1374 return TD.getPointerSize();
1377 return 8;
1381 return 4;
1383 return 0;
1384 }
1385 llvm_unreachable("Unknown jump table encoding!");
1386}
1387
1388/// Return the alignment of each entry in the jump table.
1390 // The alignment of a jump table entry is the alignment of int32 unless the
1391 // entry is just the address of a block, in which case it is the pointer
1392 // alignment.
1393 switch (getEntryKind()) {
1395 return TD.getPointerABIAlignment(0).value();
1398 return TD.getABIIntegerTypeAlignment(64).value();
1402 return TD.getABIIntegerTypeAlignment(32).value();
1404 return 1;
1405 }
1406 llvm_unreachable("Unknown jump table encoding!");
1407}
1408
1409/// Create a new jump table entry in the jump table info.
1411 const std::vector<MachineBasicBlock*> &DestBBs) {
1412 assert(!DestBBs.empty() && "Cannot create an empty jump table!");
1413 JumpTables.push_back(MachineJumpTableEntry(DestBBs));
1414 return JumpTables.size()-1;
1415}
1416
1418 size_t JTI, MachineFunctionDataHotness Hotness) {
1419 assert(JTI < JumpTables.size() && "Invalid JTI!");
1420 // Record the largest hotness value.
1421 if (Hotness <= JumpTables[JTI].Hotness)
1422 return false;
1423
1424 JumpTables[JTI].Hotness = Hotness;
1425 return true;
1426}
1427
1428/// If Old is the target of any jump tables, update the jump tables to branch
1429/// to New instead.
1431 MachineBasicBlock *New) {
1432 assert(Old != New && "Not making a change?");
1433 bool MadeChange = false;
1434 for (size_t i = 0, e = JumpTables.size(); i != e; ++i)
1435 ReplaceMBBInJumpTable(i, Old, New);
1436 return MadeChange;
1437}
1438
1439/// If MBB is present in any jump tables, remove it.
1441 bool MadeChange = false;
1442 for (MachineJumpTableEntry &JTE : JumpTables) {
1443 auto removeBeginItr = std::remove(JTE.MBBs.begin(), JTE.MBBs.end(), MBB);
1444 MadeChange |= (removeBeginItr != JTE.MBBs.end());
1445 JTE.MBBs.erase(removeBeginItr, JTE.MBBs.end());
1446 }
1447 return MadeChange;
1448}
1449
1450/// If Old is a target of the jump tables, update the jump table to branch to
1451/// New instead.
1453 MachineBasicBlock *Old,
1454 MachineBasicBlock *New) {
1455 assert(Old != New && "Not making a change?");
1456 bool MadeChange = false;
1457 MachineJumpTableEntry &JTE = JumpTables[Idx];
1458 for (MachineBasicBlock *&MBB : JTE.MBBs)
1459 if (MBB == Old) {
1460 MBB = New;
1461 MadeChange = true;
1462 }
1463 return MadeChange;
1464}
1465
1467 if (JumpTables.empty()) return;
1468
1469 OS << "Jump Tables:\n";
1470
1471 for (unsigned i = 0, e = JumpTables.size(); i != e; ++i) {
1472 OS << printJumpTableEntryReference(i) << ':';
1473 for (const MachineBasicBlock *MBB : JumpTables[i].MBBs)
1474 OS << ' ' << printMBBReference(*MBB);
1475 OS << '\n';
1476 }
1477
1478 OS << '\n';
1479}
1480
1481#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1483#endif
1484
1486 return Printable([Idx](raw_ostream &OS) { OS << "%jump-table." << Idx; });
1487}
1488
1489//===----------------------------------------------------------------------===//
1490// MachineConstantPool implementation
1491//===----------------------------------------------------------------------===//
1492
1493void MachineConstantPoolValue::anchor() {}
1494
1496 return DL.getTypeAllocSize(Ty);
1497}
1498
1501 return Val.MachineCPVal->getSizeInBytes(DL);
1502 return DL.getTypeAllocSize(Val.ConstVal->getType());
1503}
1504
1507 return true;
1508 return Val.ConstVal->needsDynamicRelocation();
1509}
1510
1513 if (needsRelocation())
1515 switch (getSizeInBytes(*DL)) {
1516 case 4:
1518 case 8:
1520 case 16:
1522 case 32:
1524 default:
1525 return SectionKind::getReadOnly();
1526 }
1527}
1528
1530 // A constant may be a member of both Constants and MachineCPVsSharingEntries,
1531 // so keep track of which we've deleted to avoid double deletions.
1533 for (const MachineConstantPoolEntry &C : Constants)
1534 if (C.isMachineConstantPoolEntry()) {
1535 Deleted.insert(C.Val.MachineCPVal);
1536 delete C.Val.MachineCPVal;
1537 }
1538 for (MachineConstantPoolValue *CPV : MachineCPVsSharingEntries) {
1539 if (Deleted.count(CPV) == 0)
1540 delete CPV;
1541 }
1542}
1543
1544/// Test whether the given two constants can be allocated the same constant pool
1545/// entry referenced by \param A.
1546static bool CanShareConstantPoolEntry(const Constant *A, const Constant *B,
1547 const DataLayout &DL) {
1548 // Handle the trivial case quickly.
1549 if (A == B) return true;
1550
1551 // If they have the same type but weren't the same constant, quickly
1552 // reject them.
1553 if (A->getType() == B->getType()) return false;
1554
1555 // We can't handle structs or arrays.
1556 if (isa<StructType>(A->getType()) || isa<ArrayType>(A->getType()) ||
1557 isa<StructType>(B->getType()) || isa<ArrayType>(B->getType()))
1558 return false;
1559
1560 // For now, only support constants with the same size.
1561 uint64_t StoreSize = DL.getTypeStoreSize(A->getType());
1562 if (StoreSize != DL.getTypeStoreSize(B->getType()) || StoreSize > 128)
1563 return false;
1564
1565 bool ContainsUndefOrPoisonA = A->containsUndefOrPoisonElement();
1566
1567 Type *IntTy = IntegerType::get(A->getContext(), StoreSize*8);
1568
1569 // Try constant folding a bitcast of both instructions to an integer. If we
1570 // get two identical ConstantInt's, then we are good to share them. We use
1571 // the constant folding APIs to do this so that we get the benefit of
1572 // DataLayout.
1573 if (isa<PointerType>(A->getType()))
1574 A = ConstantFoldCastOperand(Instruction::PtrToInt,
1575 const_cast<Constant *>(A), IntTy, DL);
1576 else if (A->getType() != IntTy)
1577 A = ConstantFoldCastOperand(Instruction::BitCast, const_cast<Constant *>(A),
1578 IntTy, DL);
1579 if (isa<PointerType>(B->getType()))
1580 B = ConstantFoldCastOperand(Instruction::PtrToInt,
1581 const_cast<Constant *>(B), IntTy, DL);
1582 else if (B->getType() != IntTy)
1583 B = ConstantFoldCastOperand(Instruction::BitCast, const_cast<Constant *>(B),
1584 IntTy, DL);
1585
1586 if (A != B)
1587 return false;
1588
1589 // Constants only safely match if A doesn't contain undef/poison.
1590 // As we'll be reusing A, it doesn't matter if B contain undef/poison.
1591 // TODO: Handle cases where A and B have the same undef/poison elements.
1592 // TODO: Merge A and B with mismatching undef/poison elements.
1593 return !ContainsUndefOrPoisonA;
1594}
1595
1596/// Create a new entry in the constant pool or return an existing one.
1597/// User must specify the log2 of the minimum required alignment for the object.
1599 Align Alignment) {
1600 if (Alignment > PoolAlignment) PoolAlignment = Alignment;
1601
1602 // Check to see if we already have this constant.
1603 //
1604 // FIXME, this could be made much more efficient for large constant pools.
1605 for (unsigned i = 0, e = Constants.size(); i != e; ++i)
1606 if (!Constants[i].isMachineConstantPoolEntry() &&
1607 CanShareConstantPoolEntry(Constants[i].Val.ConstVal, C, DL)) {
1608 if (Constants[i].getAlign() < Alignment)
1609 Constants[i].Alignment = Alignment;
1610 return i;
1611 }
1612
1613 Constants.push_back(MachineConstantPoolEntry(C, Alignment));
1614 return Constants.size()-1;
1615}
1616
1618 Align Alignment) {
1619 if (Alignment > PoolAlignment) PoolAlignment = Alignment;
1620
1621 // Check to see if we already have this constant.
1622 //
1623 // FIXME, this could be made much more efficient for large constant pools.
1624 int Idx = V->getExistingMachineCPValue(this, Alignment);
1625 if (Idx != -1) {
1626 MachineCPVsSharingEntries.insert(V);
1627 return (unsigned)Idx;
1628 }
1629
1630 Constants.push_back(MachineConstantPoolEntry(V, Alignment));
1631 return Constants.size()-1;
1632}
1633
1635 if (Constants.empty()) return;
1636
1637 OS << "Constant Pool:\n";
1638 for (unsigned i = 0, e = Constants.size(); i != e; ++i) {
1639 OS << " cp#" << i << ": ";
1640 if (Constants[i].isMachineConstantPoolEntry())
1641 Constants[i].Val.MachineCPVal->print(OS);
1642 else
1643 Constants[i].Val.ConstVal->printAsOperand(OS, /*PrintType=*/false);
1644 OS << ", align=" << Constants[i].getAlign().value();
1645 OS << "\n";
1646 }
1647}
1648
1649//===----------------------------------------------------------------------===//
1650// Template specialization for MachineFunction implementation of
1651// ProfileSummaryInfo::getEntryCount().
1652//===----------------------------------------------------------------------===//
1653template <>
1654std::optional<Function::ProfileCount>
1655ProfileSummaryInfo::getEntryCount<llvm::MachineFunction>(
1656 const llvm::MachineFunction *F) const {
1657 return F->getFunction().getEntryCount();
1658}
1659
1660#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1662#endif
MachineInstrBuilder MachineInstrBuilder & DefMI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
aarch64 promote const
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
This file contains the simple types necessary to represent the attributes associated with functions a...
static const Function * getParent(const Value *V)
This file implements the BitVector class.
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
#define LLVM_DUMP_METHOD
Mark debug helper function definitions like dump() that should not be stripped from debug builds.
Definition Compiler.h:661
This file defines the DenseMap class.
This file defines the DenseSet and SmallDenseSet classes.
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
Module.h This file contains the declarations for the Module class.
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
This file declares the MachineConstantPool class which is an abstract constant pool to keep track of ...
static FramePointerKind getFramePointerPolicy(const Function &F)
static cl::opt< unsigned > AlignAllFunctions("align-all-functions", cl::desc("Force the alignment of all functions in log2 format (e.g. 4 " "means align on 16B boundaries)."), cl::init(0), cl::Hidden)
static const MachineInstr * getCallInstr(const MachineInstr *MI)
Return the call machine instruction or find a call within bundle.
static Align getFnStackAlignment(const TargetSubtargetInfo &STI, const Function &F)
static bool CanShareConstantPoolEntry(const Constant *A, const Constant *B, const DataLayout &DL)
Test whether the given two constants can be allocated the same constant pool entry referenced by.
void setUnsafeStackSize(const Function &F, MachineFrameInfo &FrameInfo)
static const char * getPropertyName(MachineFunctionProperties::Property Prop)
Register const TargetRegisterInfo * TRI
This file contains the declarations for metadata subclasses.
#define P(N)
Basic Register Allocator
static bool isSimple(Instruction *I)
This file contains some templates that are useful if you are working with the STL at all.
This file defines the SmallString class.
This file defines the SmallVector class.
This file implements the StringSwitch template, which mimics a switch() statement whose cases are str...
static const int BlockSize
Definition TarWriter.cpp:33
This file describes how to lower LLVM code to machine code.
void print(OutputBuffer &OB) const
void clear(AllocatorType &Allocator)
Release all the tracked allocations to the allocator.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
iterator end() const
Definition ArrayRef.h:131
size_t size() const
size - Get the array size.
Definition ArrayRef.h:142
iterator begin() const
Definition ArrayRef.h:130
Functions, function parameters, and return types can have attributes to indicate how they should be t...
Definition Attributes.h:105
LLVM_ABI StringRef getValueAsString() const
Return the attribute's value as a string.
bool isValid() const
Return true if the attribute is any kind of attribute.
Definition Attributes.h:261
LLVM Basic Block Representation.
Definition BasicBlock.h:62
LLVM_ABI InstListType::const_iterator getFirstNonPHIIt() const
Returns an iterator to the first instruction in this block that is not a PHINode instruction.
InstListType::const_iterator const_iterator
Definition BasicBlock.h:171
unsigned size_type
Definition BitVector.h:115
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
LLVM_ABI bool isIndirectCall() const
Return true if the callsite is an indirect call.
This is an important base class in LLVM.
Definition Constant.h:43
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
Align getABIIntegerTypeAlignment(unsigned BitWidth) const
Returns the minimum ABI-required alignment for an integer type of the specified bitwidth.
Definition DataLayout.h:634
LLVM_ABI unsigned getPointerSize(unsigned AS=0) const
The pointer representation size in bytes, rounded up to a whole number of bytes.
LLVM_ABI Align getPointerABIAlignment(unsigned AS) const
Layout pointer alignment.
A debug info location.
Definition DebugLoc.h:123
iterator find(const_arg_type_t< KeyT > Val)
Definition DenseMap.h:178
DenseMapIterator< KeyT, ValueT, KeyInfoT, BucketT > iterator
Definition DenseMap.h:74
iterator end()
Definition DenseMap.h:81
Implements a dense probed hash-table based set.
Definition DenseSet.h:279
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition Function.cpp:728
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
Class to represent integer types.
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition Type.cpp:354
Context object for machine code objects.
Definition MCContext.h:83
Describe properties that are true of each instruction in the target description file.
unsigned getNumRegs() const
Return the number of registers this target has (useful for sizing arrays holding per register informa...
Wrapper class representing physical registers. Should be passed by value.
Definition MCRegister.h:41
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
Definition MCSymbol.h:42
Metadata node.
Definition Metadata.h:1080
const MDOperand & getOperand(unsigned I) const
Definition Metadata.h:1444
ArrayRef< MDOperand > operands() const
Definition Metadata.h:1442
Tracking metadata reference owned by Metadata.
Definition Metadata.h:902
A single uniqued string.
Definition Metadata.h:722
LLVM_ABI StringRef getString() const
Definition Metadata.cpp:632
void setIsEndSection(bool V=true)
LLVM_ABI instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
MBBSectionID getSectionID() const
Returns the section ID of this basic block.
LLVM_ABI iterator getFirstNonPHI()
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
Instructions::const_iterator const_instr_iterator
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
MachineInstrBundleIterator< MachineInstr > iterator
void setIsBeginSection(bool V=true)
This class is a data container for one entry in a MachineConstantPool.
union llvm::MachineConstantPoolEntry::@004270020304201266316354007027341142157160323045 Val
The constant itself.
bool needsRelocation() const
This method classifies the entry according to whether or not it may generate a relocation entry.
bool isMachineConstantPoolEntry() const
isMachineConstantPoolEntry - Return true if the MachineConstantPoolEntry is indeed a target specific ...
unsigned getSizeInBytes(const DataLayout &DL) const
SectionKind getSectionKind(const DataLayout *DL) const
Abstract base class for all machine specific constantpool value subclasses.
virtual unsigned getSizeInBytes(const DataLayout &DL) const
The MachineConstantPool class keeps track of constants referenced by a function which must be spilled...
void dump() const
dump - Call print(cerr) to be called from the debugger.
void print(raw_ostream &OS) const
print - Used by the MachineFunction printer to print information about constant pool objects.
unsigned getConstantPoolIndex(const Constant *C, Align Alignment)
getConstantPoolIndex - Create a new entry in the constant pool or return an existing one.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
void setFramePointerPolicy(FramePointerKind Kind)
LLVM_ABI void print(raw_ostream &OS) const
Print the MachineFunctionProperties in human-readable form.
MachineFunctionProperties & reset(Property P)
virtual void MF_HandleRemoval(MachineInstr &MI)=0
Callback before a removal. This should not modify the MI directly.
virtual void MF_HandleInsertion(MachineInstr &MI)=0
Callback after an insertion. This should not modify the MI directly.
int getFilterIDFor(ArrayRef< unsigned > TyIds)
Return the id of the filter encoded by TyIds. This is function wide.
bool UseDebugInstrRef
Flag for whether this function contains DBG_VALUEs (false) or DBG_INSTR_REF (true).
void moveAdditionalCallInfo(const MachineInstr *Old, const MachineInstr *New)
Move the call site info from Old to \New call site info.
std::pair< unsigned, unsigned > DebugInstrOperandPair
Pair of instruction number and operand number.
unsigned addFrameInst(const MCCFIInstruction &Inst)
bool useDebugInstrRef() const
Returns true if the function's variable locations are tracked with instruction referencing.
SmallVector< DebugSubstitution, 8 > DebugValueSubstitutions
Debug value substitutions: a collection of DebugSubstitution objects, recording changes in where a va...
unsigned getFunctionNumber() const
getFunctionNumber - Return a unique ID for the current function.
MCSymbol * getPICBaseSymbol() const
getPICBaseSymbol - Return a function-local symbol to represent the PIC base.
void viewCFGOnly() const
viewCFGOnly - This function is meant for use from the debugger.
ArrayRef< int > allocateShuffleMask(ArrayRef< int > Mask)
void substituteDebugValuesForInst(const MachineInstr &Old, MachineInstr &New, unsigned MaxOperand=UINT_MAX)
Create substitutions for any tracked values in Old, to point at New.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineJumpTableInfo * getOrCreateJumpTableInfo(unsigned JTEntryKind)
getOrCreateJumpTableInfo - Get the JumpTableInfo for this function, if it does already exist,...
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
void dump() const
dump - Print the current MachineFunction to cerr, useful for debugger use.
void makeDebugValueSubstitution(DebugInstrOperandPair, DebugInstrOperandPair, unsigned SubReg=0)
Create a substitution between one <instr,operand> value to a different, new value.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFunction(Function &F, const TargetMachine &Target, const TargetSubtargetInfo &STI, MCContext &Ctx, unsigned FunctionNum)
bool needsFrameMoves() const
True if this function needs frame moves for debug or exceptions.
MachineInstr::ExtraInfo * createMIExtraInfo(ArrayRef< MachineMemOperand * > MMOs, MCSymbol *PreInstrSymbol=nullptr, MCSymbol *PostInstrSymbol=nullptr, MDNode *HeapAllocMarker=nullptr, MDNode *PCSections=nullptr, uint32_t CFIType=0, MDNode *MMRAs=nullptr, Value *DS=nullptr)
Allocate and construct an extra info structure for a MachineInstr.
unsigned getTypeIDFor(const GlobalValue *TI)
Return the type id for the specified typeinfo. This is function wide.
void finalizeDebugInstrRefs()
Finalise any partially emitted debug instructions.
void deallocateOperandArray(OperandCapacity Cap, MachineOperand *Array)
Dellocate an array of MachineOperands and recycle the memory.
DenormalMode getDenormalMode(const fltSemantics &FPType) const
Returns the denormal handling type for the default rounding mode of the function.
void initTargetMachineFunctionInfo(const TargetSubtargetInfo &STI)
Initialize the target specific MachineFunctionInfo.
const char * createExternalSymbolName(StringRef Name)
Allocate a string and populate it with the given external symbol name.
uint32_t * allocateRegMask()
Allocate and initialize a register mask with NumRegister bits.
MCSymbol * getJTISymbol(unsigned JTI, MCContext &Ctx, bool isLinkerPrivate=false) const
getJTISymbol - Return the MCSymbol for the specified non-empty jump table.
void setCallSiteLandingPad(MCSymbol *Sym, ArrayRef< unsigned > Sites)
Map the landing pad's EH symbol to the call site indexes.
void setUseDebugInstrRef(bool UseInstrRef)
Set whether this function will use instruction referencing or not.
LandingPadInfo & getOrCreateLandingPadInfo(MachineBasicBlock *LandingPad)
Find or create an LandingPadInfo for the specified MachineBasicBlock.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
MCSymbol * addLandingPad(MachineBasicBlock *LandingPad)
Add a new panding pad, and extract the exception handling information from the landingpad instruction...
unsigned DebugInstrNumberingCount
A count of how many instructions in the function have had numbers assigned to them.
void deleteMachineBasicBlock(MachineBasicBlock *MBB)
DeleteMachineBasicBlock - Delete the given MachineBasicBlock.
Align getAlignment() const
getAlignment - Return the alignment of the function.
void handleChangeDesc(MachineInstr &MI, const MCInstrDesc &TID)
static const unsigned int DebugOperandMemNumber
A reserved operand number representing the instructions memory operand, for instructions that have a ...
Function & getFunction()
Return the LLVM function that this machine code represents.
Align getPreferredAlignment() const
Returns the preferred alignment which comes from the function attributes (optsize,...
DebugInstrOperandPair salvageCopySSAImpl(MachineInstr &MI)
const MachineBasicBlock & back() const
BasicBlockListType::iterator iterator
void setDebugInstrNumberingCount(unsigned Num)
Set value of DebugInstrNumberingCount field.
bool shouldSplitStack() const
Should we be emitting segmented stack stuff for the function.
void viewCFG() const
viewCFG - This function is meant for use from the debugger.
bool shouldUseDebugInstrRef() const
Determine whether, in the current machine configuration, we should use instruction referencing or not...
const MachineFunctionProperties & getProperties() const
Get the function properties.
void eraseAdditionalCallInfo(const MachineInstr *MI)
Following functions update call site info.
void RenumberBlocks(MachineBasicBlock *MBBFrom=nullptr)
RenumberBlocks - This discards all of the MachineBasicBlock numbers and recomputes them.
const MachineBasicBlock & front() const
Register addLiveIn(MCRegister PReg, const TargetRegisterClass *RC)
addLiveIn - Add the specified physical register as a live-in value and create a corresponding virtual...
int64_t estimateFunctionSizeInBytes()
Return an estimate of the function's code size, taking into account block and function alignment.
void print(raw_ostream &OS, const SlotIndexes *=nullptr) const
print - Print out the MachineFunction in a format suitable for debugging to the specified stream.
void addInvoke(MachineBasicBlock *LandingPad, MCSymbol *BeginLabel, MCSymbol *EndLabel)
Provide the begin and end labels of an invoke style call and associate it with a try landing pad bloc...
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *BB=nullptr, std::optional< UniqueBBID > BBID=std::nullopt)
CreateMachineInstr - Allocate a new MachineInstr.
void copyAdditionalCallInfo(const MachineInstr *Old, const MachineInstr *New)
Copy the call site info from Old to \ New.
VariableDbgInfoMapTy VariableDbgInfos
void assignBeginEndSections()
Assign IsBeginSection IsEndSection fields for basic blocks in this function.
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
DebugInstrOperandPair salvageCopySSA(MachineInstr &MI, DenseMap< Register, DebugInstrOperandPair > &DbgPHICache)
Find the underlying defining instruction / operand for a COPY instruction while in SSA form.
Representation of each machine instruction.
LLVM_ABI void bundleWithPred()
Bundle this instruction with its predecessor.
bool isCopyLike() const
Return true if the instruction behaves like a copy.
filtered_mop_range all_defs()
Returns an iterator range over all operands that are (explicit or implicit) register defs.
unsigned getNumOperands() const
Retuns the total number of operands.
unsigned peekDebugInstrNum() const
Examine the instruction number of this MachineInstr.
LLVM_ABI unsigned getDebugInstrNum()
Fetch the instruction number of this MachineInstr.
const MachineOperand & getOperand(unsigned i) const
LLVM_ABI bool shouldUpdateAdditionalCallInfo() const
Return true if copying, moving, or erasing this instruction requires updating additional call info (s...
LLVM_ABI bool RemoveMBBFromJumpTables(MachineBasicBlock *MBB)
RemoveMBBFromJumpTables - If MBB is present in any jump tables, remove it.
LLVM_ABI bool ReplaceMBBInJumpTables(MachineBasicBlock *Old, MachineBasicBlock *New)
ReplaceMBBInJumpTables - If Old is the target of any jump tables, update the jump tables to branch to...
LLVM_ABI void print(raw_ostream &OS) const
print - Used by the MachineFunction printer to print information about jump tables.
LLVM_ABI unsigned getEntrySize(const DataLayout &TD) const
getEntrySize - Return the size of each entry in the jump table.
LLVM_ABI unsigned createJumpTableIndex(const std::vector< MachineBasicBlock * > &DestBBs)
createJumpTableIndex - Create a new jump table.
LLVM_ABI void dump() const
dump - Call to stderr.
LLVM_ABI bool ReplaceMBBInJumpTable(unsigned Idx, MachineBasicBlock *Old, MachineBasicBlock *New)
ReplaceMBBInJumpTable - If Old is a target of the jump tables, update the jump table to branch to New...
LLVM_ABI bool updateJumpTableEntryHotness(size_t JTI, MachineFunctionDataHotness Hotness)
JTEntryKind
JTEntryKind - This enum indicates how each entry of the jump table is represented and emitted.
@ EK_GPRel32BlockAddress
EK_GPRel32BlockAddress - Each entry is an address of block, encoded with a relocation as gp-relative,...
@ EK_Inline
EK_Inline - Jump table entries are emitted inline at their point of use.
@ EK_LabelDifference32
EK_LabelDifference32 - Each entry is the address of the block minus the address of the jump table.
@ EK_Custom32
EK_Custom32 - Each entry is a 32-bit value that is custom lowered by the TargetLowering::LowerCustomJ...
@ EK_LabelDifference64
EK_LabelDifference64 - Each entry is the address of the block minus the address of the jump table.
@ EK_BlockAddress
EK_BlockAddress - Each entry is a plain address of block, e.g.: .word LBB123.
@ EK_GPRel64BlockAddress
EK_GPRel64BlockAddress - Each entry is an address of block, encoded with a relocation as gp-relative,...
LLVM_ABI unsigned getEntryAlignment(const DataLayout &TD) const
getEntryAlignment - Return the alignment of each entry in the jump table.
A description of a memory reference used in the backend.
LocationSize getSize() const
Return the size in bytes of the memory reference.
AtomicOrdering getFailureOrdering() const
For cmpxchg atomic operations, return the atomic ordering requirements when store does not occur.
const PseudoSourceValue * getPseudoValue() const
const MDNode * getRanges() const
Return the range tag for the memory reference.
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID for this memory operation.
Flags
Flags values. These may be or'd together.
AtomicOrdering getSuccessOrdering() const
Return the atomic ordering requirements for this memory operation.
const MachinePointerInfo & getPointerInfo() const
Flags getFlags() const
Return the raw flags of the source value,.
AAMDNodes getAAInfo() const
Return the AA tags for the memory reference.
const Value * getValue() const
Return the base address of the memory access.
Align getBaseAlign() const
Return the minimum known alignment in bytes of the base address, without the offset.
int64_t getOffset() const
For normal values, this is a byte offset added to the base address.
MachineOperand class - Representation of each machine instruction operand.
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
static unsigned getRegMaskSize(unsigned NumRegs)
Returns number of elements needed for a regmask array.
Register getReg() const
getReg - Returns the register number.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
const TargetRegisterClass * getRegClass(Register Reg) const
Return the register class of the specified virtual register.
def_iterator def_begin(Register RegNo) const
LLVM_ABI Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
std::vector< std::pair< MCRegister, Register > >::const_iterator livein_iterator
bool hasOneDef(Register RegNo) const
Return true if there is exactly one operand defining the specified register.
LLVM_ABI Register getLiveInVirtReg(MCRegister PReg) const
getLiveInVirtReg - If PReg is a live-in physical register, return the corresponding live-in virtual r...
const TargetRegisterInfo * getTargetRegisterInfo() const
void addLiveIn(MCRegister Reg, Register vreg=Register())
addLiveIn - Add the specified register as a live-in.
Manage lifetime of a slot tracker for printing IR.
void incorporateFunction(const Function &F)
Incorporate the given function.
bool isNull() const
Test if the pointer held in the union is null, regardless of which type it is.
Simple wrapper around std::function<void(raw_ostream&)>.
Definition Printable.h:38
Wrapper class representing virtual and physical registers.
Definition Register.h:20
SectionKind - This is a simple POD value that classifies the properties of a section.
Definition SectionKind.h:22
static SectionKind getMergeableConst4()
static SectionKind getReadOnlyWithRel()
static SectionKind getMergeableConst8()
static SectionKind getMergeableConst16()
static SectionKind getReadOnly()
static SectionKind getMergeableConst32()
SlotIndexes pass.
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
Definition SmallString.h:26
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
A switch()-like statement whose cases are string literals.
StringSwitch & Case(StringLiteral S, T Value)
bool isStackRealignable() const
isStackRealignable - This method returns whether the stack can be realigned.
Align getStackAlign() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
TargetInstrInfo - Interface to description of machine instruction set.
Align getMinFunctionAlignment() const
Return the minimum function alignment.
Primary interface to the complete machine description for the target machine.
TargetOptions Options
unsigned ForceDwarfFrameSection
Emit DWARF debug frame section.
bool contains(Register Reg) const
Return true if the specified register is included in this register class.
bool hasSubClassEq(const TargetRegisterClass *RC) const
Returns true if RC is a sub-class of or equal to this class.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
TargetSubtargetInfo - Generic base class for all target subtargets.
virtual const TargetFrameLowering * getFrameLowering() const
virtual const TargetInstrInfo * getInstrInfo() const
virtual const TargetRegisterInfo * getRegisterInfo() const =0
Return the target's register information.
virtual const TargetLowering * getTargetLowering() const
Target - Wrapper for Target specific information.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:46
static LLVM_ABI IntegerType * getInt64Ty(LLVMContext &C)
Definition Type.cpp:314
A Use represents the edge between a Value definition and its users.
Definition Use.h:35
LLVM Value Representation.
Definition Value.h:75
LLVMContext & getContext() const
All values hold a context through their type.
Definition Value.h:258
LLVM_ABI const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
Definition Value.cpp:709
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:318
self_iterator getIterator()
Definition ilist_node.h:123
iterator erase(iterator where)
Definition ilist.h:204
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
A raw_ostream that writes to an std::string.
A raw_ostream that writes to an SmallVector or SmallString.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
initializer< Ty > init(const Ty &Val)
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > extract(Y &&MD)
Extract a Value from Metadata.
Definition Metadata.h:668
uint64_t MD5Hash(const FunctionId &Obj)
Definition FunctionId.h:167
This is an optimization pass for GlobalISel generic memory operations.
@ Offset
Definition DWP.cpp:532
MachineBasicBlock::instr_iterator getBundleStart(MachineBasicBlock::instr_iterator I)
Returns an iterator to the first instruction in the bundle containing I.
FramePointerKind
Definition CodeGen.h:118
MaybeAlign getAlign(const CallInst &I, unsigned Index)
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition STLExtras.h:2208
LLVM_ABI Printable printJumpTableEntryReference(unsigned Idx)
Prints a jump table entry reference.
bool isScopedEHPersonality(EHPersonality Pers)
Returns true if this personality uses scope-style EH IR instructions: catchswitch,...
MachineFunctionDataHotness
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:753
auto formatv(bool Validate, const char *Fmt, Ts &&...Vals)
auto reverse(ContainerTy &&C)
Definition STLExtras.h:408
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:163
MachineBasicBlock::instr_iterator getBundleEnd(MachineBasicBlock::instr_iterator I)
Returns an iterator pointing beyond the bundle containing I.
constexpr uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition Alignment.h:144
LLVM_ABI Constant * ConstantFoldCastOperand(unsigned Opcode, Constant *C, Type *DestTy, const DataLayout &DL)
Attempt to constant fold a cast with the specified operand.
LLVM_ABI EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
AtomicOrdering
Atomic ordering for LLVM's memory model.
bool isFuncletEHPersonality(EHPersonality Pers)
Returns true if this is a personality function that invokes handler funclets (which must return to it...
DWARFExpression::Operation Op
void ViewGraph(const GraphType &G, const Twine &Name, bool ShortNames=false, const Twine &Title="", GraphProgram::Name Program=GraphProgram::DOT)
ViewGraph - Emit a dot graph, run 'dot', run gv on the postscript file, then cleanup.
OutputIt copy(R &&Range, OutputIt Out)
Definition STLExtras.h:1885
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
Definition Alignment.h:201
LLVM_ABI Printable printReg(Register Reg, const TargetRegisterInfo *TRI=nullptr, unsigned SubIdx=0, const MachineRegisterInfo *MRI=nullptr)
Prints virtual and physical registers with or without a TRI instance.
LLVM_ABI Printable printMBBReference(const MachineBasicBlock &MBB)
Prints a machine basic block reference.
bool debuginfoShouldUseDebugInstrRef(const Triple &T)
#define N
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
Definition Metadata.h:763
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
Definition Alignment.h:77
std::string getNodeLabel(const MachineBasicBlock *Node, const MachineFunction *Graph)
static std::string getGraphName(const MachineFunction *F)
DOTGraphTraits - Template class that can be specialized to customize how graphs are converted to 'dot...
Represent subnormal handling kind for floating point instruction inputs and outputs.
This structure is used to retain landing pad info for the current function.
SmallVector< MCSymbol *, 1 > EndLabels
MachineBasicBlock * LandingPadBlock
SmallVector< MCSymbol *, 1 > BeginLabels
std::vector< int > TypeIds
SmallVector< ConstantInt *, 4 > CalleeTypeIds
Callee type ids.
MDNode * CallTarget
'call_target' metadata for the DISubprogram.
MachineJumpTableEntry - One jump table in the jump table info.
LLVM_ABI MachineJumpTableEntry(const std::vector< MachineBasicBlock * > &M)
std::vector< MachineBasicBlock * > MBBs
MBBs - The vector of basic blocks from which to create the jump table.
MachineFunctionDataHotness Hotness
The hotness of MJTE is inferred from the hotness of the source basic block(s) that reference it.
This class contains a discriminated union of information about pointers in memory operands,...
PointerUnion< const Value *, const PseudoSourceValue * > V
This is the IR pointer value for the access, or it is null if unknown.
MachinePointerInfo getWithOffset(int64_t O) const
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition Alignment.h:106
static void deleteNode(NodeTy *V)
Definition ilist.h:42