LLVM 22.0.0git
RISCVZilsdOptimizer.cpp
Go to the documentation of this file.
1//===-- RISCVZilsdOptimizer.cpp - RISC-V Zilsd Load/Store Optimizer ------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains a pass that performs load/store optimizations for the
10// RISC-V Zilsd extension. It combines pairs of 32-bit load/store instructions
11// into single 64-bit LD/SD instructions when possible.
12//
13// The pass runs in two phases:
14// 1. Pre-allocation: Reschedules loads/stores to bring consecutive memory
15// accesses closer together and forms LD/SD pairs with register hints.
16// 2. Post-allocation: Fixes invalid LD/SD instructions if register allocation
17// didn't provide suitable consecutive registers.
18//
19// Note: second phase is integrated into RISCVLoadStoreOptimizer
20//
21//===----------------------------------------------------------------------===//
22
23#include "RISCV.h"
24#include "RISCVInstrInfo.h"
25#include "RISCVRegisterInfo.h"
26#include "RISCVSubtarget.h"
27#include "llvm/ADT/DenseMap.h"
29#include "llvm/ADT/Statistic.h"
40#include "llvm/Support/Debug.h"
41#include <algorithm>
42
43using namespace llvm;
44
45#define DEBUG_TYPE "riscv-zilsd-opt"
46
47STATISTIC(NumLDFormed, "Number of LD instructions formed");
48STATISTIC(NumSDFormed, "Number of SD instructions formed");
49
50static cl::opt<bool>
51 DisableZilsdOpt("disable-riscv-zilsd-opt", cl::Hidden, cl::init(false),
52 cl::desc("Disable Zilsd load/store optimization"));
53
55 "riscv-zilsd-max-reschedule-distance", cl::Hidden, cl::init(10),
56 cl::desc("Maximum distance for rescheduling load/store instructions"));
57
58namespace {
59
60//===----------------------------------------------------------------------===//
61// Pre-allocation Zilsd optimization pass
62//===----------------------------------------------------------------------===//
63class RISCVPreAllocZilsdOpt : public MachineFunctionPass {
64public:
65 static char ID;
66
67 RISCVPreAllocZilsdOpt() : MachineFunctionPass(ID) {}
68
69 bool runOnMachineFunction(MachineFunction &MF) override;
70
71 StringRef getPassName() const override {
72 return "RISC-V pre-allocation Zilsd load/store optimization";
73 }
74
75 MachineFunctionProperties getRequiredProperties() const override {
76 return MachineFunctionProperties().setIsSSA();
77 }
78
79 void getAnalysisUsage(AnalysisUsage &AU) const override {
80 AU.addRequired<AAResultsWrapperPass>();
81 AU.addRequired<MachineDominatorTreeWrapperPass>();
82 AU.setPreservesCFG();
84 }
85 enum class MemoryOffsetKind {
86 Imm = 0,
87 Global = 1,
88 CPI = 2,
89 BlockAddr = 3,
90 FrameIdx = 4,
91 Unknown = 5,
92 };
93 using MemOffset = std::pair<MemoryOffsetKind, int>;
94 using BaseRegInfo = std::pair<unsigned, MemoryOffsetKind>;
95
96private:
97 bool isMemoryOp(const MachineInstr &MI);
98 bool rescheduleLoadStoreInstrs(MachineBasicBlock *MBB);
99 bool canFormLdSdPair(MachineInstr *MI0, MachineInstr *MI1);
100 bool rescheduleOps(MachineBasicBlock *MBB,
101 SmallVectorImpl<MachineInstr *> &MIs, BaseRegInfo Base,
102 bool IsLoad,
103 DenseMap<MachineInstr *, unsigned> &MI2LocMap);
104 bool isSafeToMove(MachineInstr *MI, MachineInstr *Target, bool MoveForward);
105 MemOffset getMemoryOpOffset(const MachineInstr &MI);
106
107 const RISCVSubtarget *STI;
108 const RISCVInstrInfo *TII;
109 const RISCVRegisterInfo *TRI;
110 MachineRegisterInfo *MRI;
111 AliasAnalysis *AA;
112 MachineDominatorTree *DT;
113 Align RequiredAlign;
114};
115
116} // end anonymous namespace
117
118char RISCVPreAllocZilsdOpt::ID = 0;
119
120INITIALIZE_PASS_BEGIN(RISCVPreAllocZilsdOpt, "riscv-prera-zilsd-opt",
121 "RISC-V pre-allocation Zilsd optimization", false, false)
124INITIALIZE_PASS_END(RISCVPreAllocZilsdOpt, "riscv-prera-zilsd-opt",
125 "RISC-V pre-allocation Zilsd optimization", false, false)
126
127//===----------------------------------------------------------------------===//
128// Pre-allocation pass implementation
129//===----------------------------------------------------------------------===//
130
131bool RISCVPreAllocZilsdOpt::runOnMachineFunction(MachineFunction &MF) {
132
133 if (DisableZilsdOpt || skipFunction(MF.getFunction()))
134 return false;
135
136 STI = &MF.getSubtarget<RISCVSubtarget>();
137
138 // Only run on RV32 with Zilsd extension
139 if (STI->is64Bit() || !STI->hasStdExtZilsd())
140 return false;
141
142 TII = STI->getInstrInfo();
143 TRI = STI->getRegisterInfo();
144 MRI = &MF.getRegInfo();
145 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
146 DT = &getAnalysis<MachineDominatorTreeWrapperPass>().getDomTree();
147
148 // Check alignment: default is 8-byte, but allow 4-byte with tune feature
149 // If unaligned scalar memory is enabled, allow any alignment
150 RequiredAlign = STI->getZilsdAlign();
151 bool Modified = false;
152 for (auto &MBB : MF) {
153 Modified |= rescheduleLoadStoreInstrs(&MBB);
154 }
155
156 return Modified;
157}
158
159RISCVPreAllocZilsdOpt::MemOffset
160RISCVPreAllocZilsdOpt::getMemoryOpOffset(const MachineInstr &MI) {
161 switch (MI.getOpcode()) {
162 case RISCV::LW:
163 case RISCV::SW: {
164 // For LW/SW, the base is in operand 1 and offset is in operand 2
165 const MachineOperand &BaseOp = MI.getOperand(1);
166 const MachineOperand &OffsetOp = MI.getOperand(2);
167
168 // Handle immediate offset
169 if (OffsetOp.isImm()) {
170 if (BaseOp.isFI())
171 return std::make_pair(MemoryOffsetKind::FrameIdx, OffsetOp.getImm());
172 return std::make_pair(MemoryOffsetKind::Imm, OffsetOp.getImm());
173 }
174
175 // Handle symbolic operands with MO_LO flag (from MergeBaseOffset)
176 if (OffsetOp.getTargetFlags() & RISCVII::MO_LO) {
177 if (OffsetOp.isGlobal())
178 return std::make_pair(MemoryOffsetKind::Global, OffsetOp.getOffset());
179 if (OffsetOp.isCPI())
180 return std::make_pair(MemoryOffsetKind::CPI, OffsetOp.getOffset());
181 if (OffsetOp.isBlockAddress())
182 return std::make_pair(MemoryOffsetKind::BlockAddr,
183 OffsetOp.getOffset());
184 }
185
186 break;
187 }
188 default:
189 break;
190 }
191
192 return std::make_pair(MemoryOffsetKind::Unknown, 0);
193}
194
195bool RISCVPreAllocZilsdOpt::canFormLdSdPair(MachineInstr *MI0,
196 MachineInstr *MI1) {
197 if (!MI0->hasOneMemOperand() || !MI1->hasOneMemOperand())
198 return false;
199
200 // Get offsets and check they are consecutive
201 int Offset0 = getMemoryOpOffset(*MI0).second;
202 int Offset1 = getMemoryOpOffset(*MI1).second;
203
204 // Offsets must be 4 bytes apart
205 if (Offset1 - Offset0 != 4)
206 return false;
207
208 // We need to guarantee the alignment(base + offset) is legal.
209 const MachineMemOperand *MMO = *MI0->memoperands_begin();
210 if (MMO->getAlign() < RequiredAlign)
211 return false;
212
213 // Check that the two destination/source registers are different for
214 // load/store respectively.
215 Register FirstReg = MI0->getOperand(0).getReg();
216 Register SecondReg = MI1->getOperand(0).getReg();
217 if (FirstReg == SecondReg)
218 return false;
219
220 return true;
221}
222
223bool RISCVPreAllocZilsdOpt::isSafeToMove(MachineInstr *MI, MachineInstr *Target,
224 bool MoveForward) {
225 MachineBasicBlock *MBB = MI->getParent();
226 MachineBasicBlock::iterator Start = MI->getIterator();
227 MachineBasicBlock::iterator End = Target->getIterator();
228
229 if (!MoveForward)
230 std::swap(Start, End);
231
232 // Increment Start to skip the current instruction
233 if (Start != MBB->end())
234 ++Start;
235
236 Register DefReg = MI->getOperand(0).getReg();
237 const MachineOperand &BaseOp = MI->getOperand(1);
238
239 unsigned ScanCount = 0;
240 for (auto It = Start; It != End; ++It, ++ScanCount) {
241 // Don't move across calls or terminators
242 if (It->isCall() || It->isTerminator()) {
243 LLVM_DEBUG(dbgs() << "Cannot move across call/terminator: " << *It);
244 return false;
245 }
246
247 // Don't move across instructions that modify memory barrier
248 if (It->hasUnmodeledSideEffects()) {
249 LLVM_DEBUG(dbgs() << "Cannot move across instruction with side effects: "
250 << *It);
251 return false;
252 }
253
254 // Check if the base register is modified
255 if (BaseOp.isReg() && It->modifiesRegister(BaseOp.getReg(), TRI)) {
256 LLVM_DEBUG(dbgs() << "Base register " << BaseOp.getReg()
257 << " modified by: " << *It);
258 return false;
259 }
260
261 // For loads, check if the loaded value is used
262 if (MI->mayLoad() &&
263 (It->readsRegister(DefReg, TRI) || It->modifiesRegister(DefReg, TRI))) {
264 LLVM_DEBUG(dbgs() << "Destination register " << DefReg
265 << " used by: " << *It);
266 return false;
267 }
268
269 // For stores, check if the stored register is modified
270 if (MI->mayStore() && It->modifiesRegister(DefReg, TRI)) {
271 LLVM_DEBUG(dbgs() << "Source register " << DefReg
272 << " modified by: " << *It);
273 return false;
274 }
275
276 // Check for memory operation interference
277 if (It->mayLoadOrStore() && It->mayAlias(AA, *MI, /*UseTBAA*/ false)) {
278 LLVM_DEBUG(dbgs() << "Memory operation interference detected\n");
279 return false;
280 }
281 }
282
283 return true;
284}
285
286bool RISCVPreAllocZilsdOpt::rescheduleOps(
287 MachineBasicBlock *MBB, SmallVectorImpl<MachineInstr *> &MIs,
288 BaseRegInfo Base, bool IsLoad,
289 DenseMap<MachineInstr *, unsigned> &MI2LocMap) {
290 // Sort by offset, at this point it ensure base reg and MemoryOffsetKind are
291 // same, so we just need to simply sort by offset value.
292 llvm::sort(MIs.begin(), MIs.end(), [this](MachineInstr *A, MachineInstr *B) {
293 return getMemoryOpOffset(*A).second < getMemoryOpOffset(*B).second;
294 });
295
296 bool Modified = false;
297
298 // Try to pair consecutive operations
299 for (size_t i = 0; i + 1 < MIs.size(); i++) {
300 MachineInstr *MI0 = MIs[i];
301 MachineInstr *MI1 = MIs[i + 1];
302
303 Register FirstReg = MI0->getOperand(0).getReg();
304 Register SecondReg = MI1->getOperand(0).getReg();
305 const MachineOperand &BaseOp = MI0->getOperand(1);
306 const MachineOperand &OffsetOp = MI0->getOperand(2);
307 assert((BaseOp.isReg() || BaseOp.isFI()) &&
308 "Base register should be register or frame index");
309
310 // At this point, MI0 and MI1 are:
311 // 1. both either LW or SW.
312 // 2. guaranteed to have same memory kind.
313 // 3. guaranteed to have same base register.
314 // 4. already be sorted by offset value.
315 // so we don't have to check these in canFormLdSdPair.
316 if (!canFormLdSdPair(MI0, MI1))
317 continue;
318
319 // Use MI2LocMap to determine which instruction appears later in program
320 // order
321 bool MI1IsLater = MI2LocMap[MI1] > MI2LocMap[MI0];
322
323 // For loads: move later instruction up (backwards) to earlier instruction
324 // For stores: move earlier instruction down (forwards) to later instruction
325 MachineInstr *MoveInstr, *TargetInstr;
326 if (IsLoad) {
327 // For loads: move the later instruction to the earlier one
328 MoveInstr = MI1IsLater ? MI1 : MI0;
329 TargetInstr = MI1IsLater ? MI0 : MI1;
330 } else {
331 // For stores: move the earlier instruction to the later one
332 MoveInstr = MI1IsLater ? MI0 : MI1;
333 TargetInstr = MI1IsLater ? MI1 : MI0;
334 }
335
336 unsigned Distance = MI1IsLater ? MI2LocMap[MI1] - MI2LocMap[MI0]
337 : MI2LocMap[MI0] - MI2LocMap[MI1];
338 if (!isSafeToMove(MoveInstr, TargetInstr, !IsLoad) ||
339 Distance > MaxRescheduleDistance)
340 continue;
341
342 // Move the instruction to the target position
343 MachineBasicBlock::iterator InsertPos = TargetInstr->getIterator();
344 ++InsertPos;
345
346 // If we need to move an instruction, do it now
347 if (MoveInstr != TargetInstr)
348 MBB->splice(InsertPos, MBB, MoveInstr->getIterator());
349
350 // Create the paired instruction
351 MachineInstrBuilder MIB;
352 DebugLoc DL = MI0->getDebugLoc();
353
354 if (IsLoad) {
355 MIB = BuildMI(*MBB, InsertPos, DL, TII->get(RISCV::PseudoLD_RV32_OPT))
356 .addReg(FirstReg, RegState::Define)
357 .addReg(SecondReg, RegState::Define);
358 ++NumLDFormed;
359 LLVM_DEBUG(dbgs() << "Formed LD: " << *MIB << "\n");
360 } else {
361 MIB = BuildMI(*MBB, InsertPos, DL, TII->get(RISCV::PseudoSD_RV32_OPT))
362 .addReg(FirstReg)
363 .addReg(SecondReg);
364 ++NumSDFormed;
365 LLVM_DEBUG(dbgs() << "Formed SD: " << *MIB << "\n");
366 }
367
368 if (BaseOp.isReg())
369 MIB = MIB.addReg(BaseOp.getReg());
370 else
371 MIB = MIB.addFrameIndex(BaseOp.getIndex());
372 MIB = MIB.add(OffsetOp);
373
374 // Copy memory operands
375 MIB.cloneMergedMemRefs({MI0, MI1});
376
377 // Add register allocation hints for consecutive registers
378 // RISC-V Zilsd requires even/odd register pairs
379 // Only set hints for virtual registers (physical registers already have
380 // encoding)
381 if (FirstReg.isVirtual() && SecondReg.isVirtual()) {
382 // For virtual registers, we can't determine even/odd yet, but we can hint
383 // that they should be allocated as a consecutive pair
384 MRI->setRegAllocationHint(FirstReg, RISCVRI::RegPairEven, SecondReg);
385 MRI->setRegAllocationHint(SecondReg, RISCVRI::RegPairOdd, FirstReg);
386 }
387
388 // Remove the original instructions
389 MI0->eraseFromParent();
390 MI1->eraseFromParent();
391
392 Modified = true;
393
394 // Skip the next instruction since we've already processed it
395 i++;
396 }
397
398 return Modified;
399}
400
401bool RISCVPreAllocZilsdOpt::isMemoryOp(const MachineInstr &MI) {
402 unsigned Opcode = MI.getOpcode();
403 if (Opcode != RISCV::LW && Opcode != RISCV::SW)
404 return false;
405
406 if (!MI.getOperand(1).isReg() && !MI.getOperand(1).isFI())
407 return false;
408
409 // When no memory operands are present, conservatively assume unaligned,
410 // volatile, unfoldable.
411 if (!MI.hasOneMemOperand())
412 return false;
413
414 const MachineMemOperand *MMO = *MI.memoperands_begin();
415
416 if (MMO->isVolatile() || MMO->isAtomic())
417 return false;
418
419 // sw <undef> could probably be eliminated entirely, but for now we just want
420 // to avoid making a mess of it.
421 if (MI.getOperand(0).isReg() && MI.getOperand(0).isUndef())
422 return false;
423
424 // Likewise don't mess with references to undefined addresses.
425 if (MI.getOperand(1).isReg() && MI.getOperand(1).isUndef())
426 return false;
427
428 return true;
429}
430
431bool RISCVPreAllocZilsdOpt::rescheduleLoadStoreInstrs(MachineBasicBlock *MBB) {
432 bool Modified = false;
433
434 // Process the basic block in windows delimited by calls, terminators,
435 // or instructions with duplicate base+offset pairs
438
439 while (MBBI != E) {
440 // Map from instruction to its location in the current window
441 DenseMap<MachineInstr *, unsigned> MI2LocMap;
442
443 // Map from base register to list of load/store instructions
444 using Base2InstMap = DenseMap<BaseRegInfo, SmallVector<MachineInstr *, 4>>;
445 using BaseVec = SmallVector<BaseRegInfo, 4>;
446 Base2InstMap Base2LdsMap;
447 Base2InstMap Base2StsMap;
448 BaseVec LdBases;
449 BaseVec StBases;
450
451 unsigned Loc = 0;
452
453 // Build the current window of instructions
454 for (; MBBI != E; ++MBBI) {
455 MachineInstr &MI = *MBBI;
456
457 // Stop at barriers (calls and terminators)
458 if (MI.isCall() || MI.isTerminator()) {
459 // Move past the barrier for next iteration
460 ++MBBI;
461 break;
462 }
463
464 // Track instruction location in window
465 if (!MI.isDebugInstr())
466 MI2LocMap[&MI] = ++Loc;
467
468 MemOffset Offset = getMemoryOpOffset(MI);
469 // Skip non-memory operations or it's not a valid memory offset kind.
470 if (!isMemoryOp(MI) || Offset.first == MemoryOffsetKind::Unknown)
471 continue;
472
473 bool IsLd = (MI.getOpcode() == RISCV::LW);
474 const MachineOperand &BaseOp = MI.getOperand(1);
475 unsigned Base;
476 if (BaseOp.isReg())
477 Base = BaseOp.getReg().id();
478 else
479 Base = BaseOp.getIndex();
480 bool StopHere = false;
481
482 // Lambda to find or add base register entries
483 auto FindBases = [&](Base2InstMap &Base2Ops, BaseVec &Bases) {
484 auto [BI, Inserted] = Base2Ops.try_emplace({Base, Offset.first});
485 if (Inserted) {
486 // First time seeing this base register
487 BI->second.push_back(&MI);
488 Bases.push_back({Base, Offset.first});
489 return;
490 }
491 // Check if we've seen this exact base+offset before
492 if (any_of(BI->second, [&](const MachineInstr *PrevMI) {
493 return Offset == getMemoryOpOffset(*PrevMI);
494 })) {
495 // Found duplicate base+offset - stop here to process current window
496 StopHere = true;
497 } else {
498 BI->second.push_back(&MI);
499 }
500 };
501
502 if (IsLd)
503 FindBases(Base2LdsMap, LdBases);
504 else
505 FindBases(Base2StsMap, StBases);
506
507 if (StopHere) {
508 // Found a duplicate (a base+offset combination that's seen earlier).
509 // Backtrack to process the current window.
510 --Loc;
511 break;
512 }
513 }
514
515 // Process the current window - reschedule loads
516 for (auto Base : LdBases) {
517 SmallVectorImpl<MachineInstr *> &Lds = Base2LdsMap[Base];
518 if (Lds.size() > 1) {
519 Modified |= rescheduleOps(MBB, Lds, Base, true, MI2LocMap);
520 }
521 }
522
523 // Process the current window - reschedule stores
524 for (auto Base : StBases) {
525 SmallVectorImpl<MachineInstr *> &Sts = Base2StsMap[Base];
526 if (Sts.size() > 1) {
527 Modified |= rescheduleOps(MBB, Sts, Base, false, MI2LocMap);
528 }
529 }
530 }
531
532 return Modified;
533}
534
535//===----------------------------------------------------------------------===//
536// Pass creation functions
537//===----------------------------------------------------------------------===//
538
540 return new RISCVPreAllocZilsdOpt();
541}
unsigned const MachineRegisterInfo * MRI
for(const MachineOperand &MO :llvm::drop_begin(OldMI.operands(), Desc.getNumOperands()))
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static int getMemoryOpOffset(const MachineInstr &MI)
static bool isMemoryOp(const MachineInstr &MI)
Returns true if instruction is a memory operation that this pass is capable of operating on.
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
This file defines the DenseMap class.
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
std::pair< Instruction::BinaryOps, Value * > OffsetOp
Find all possible pairs (BinOp, RHS) that BinOp V, RHS can be simplified.
Register const TargetRegisterInfo * TRI
Promote Memory to Register
Definition Mem2Reg.cpp:110
if(PassOpts->AAPipeline)
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition PassSupport.h:42
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
Definition PassSupport.h:44
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
Definition PassSupport.h:39
static bool isSafeToMove(const MachineInstr &From, const MachineInstr &To)
Check if it's safe to move From down to To, checking that no physical registers are clobbered.
static cl::opt< bool > DisableZilsdOpt("disable-riscv-zilsd-opt", cl::Hidden, cl::init(false), cl::desc("Disable Zilsd load/store optimization"))
static cl::opt< unsigned > MaxRescheduleDistance("riscv-zilsd-max-reschedule-distance", cl::Hidden, cl::init(10), cl::desc("Maximum distance for rescheduling load/store instructions"))
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition Statistic.h:171
#define LLVM_DEBUG(...)
Definition Debug.h:114
A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object.
AnalysisUsage & addRequired()
LLVM_ABI void setPreservesCFG()
This function should be called by the pass, iff they do not:
Definition Pass.cpp:270
FunctionPass class - This class is used to implement most global optimizations.
Definition Pass.h:314
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
MachineInstrBundleIterator< MachineInstr > iterator
Analysis pass which computes a MachineDominatorTree.
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
const MachineInstrBuilder & cloneMergedMemRefs(ArrayRef< const MachineInstr * > OtherMIs) const
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
Representation of each machine instruction.
bool hasOneMemOperand() const
Return true if this instruction has exactly one MachineMemOperand.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
LLVM_ABI void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
const MachineOperand & getOperand(unsigned i) const
bool isAtomic() const
Returns true if this operation has an atomic ordering requirement of unordered or higher,...
LLVM_ABI Align getAlign() const
Return the minimum known alignment in bytes of the actual memory reference.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
Register getReg() const
getReg - Returns the register number.
bool isFI() const
isFI - Tests if this is a MO_FrameIndex operand.
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
Definition Register.h:79
constexpr unsigned id() const
Definition Register.h:100
self_iterator getIterator()
Definition ilist_node.h:123
Abstract Attribute helper functions.
Definition Attributor.h:165
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ Define
Register definition.
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
Definition Types.h:26
@ Offset
Definition DWP.cpp:532
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1744
FunctionPass * createRISCVPreAllocZilsdOptPass()
void sort(IteratorTy Start, IteratorTy End)
Definition STLExtras.h:1634
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
@ Global
Append to llvm.global_dtors.
AAResults AliasAnalysis
Temporary typedef for legacy code that uses a generic AliasAnalysis pointer or reference.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition BitVector.h:872