LLVM 22.0.0git
RISCVMergeBaseOffset.cpp
Go to the documentation of this file.
1//===----- RISCVMergeBaseOffset.cpp - Optimise address calculations ------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Merge the offset of address calculation into the offset field
10// of instructions in a global address lowering sequence.
11//
12//===----------------------------------------------------------------------===//
13
14#include "RISCV.h"
15#include "RISCVTargetMachine.h"
17#include "llvm/CodeGen/Passes.h"
19#include "llvm/Support/Debug.h"
21#include <optional>
22using namespace llvm;
23
24#define DEBUG_TYPE "riscv-merge-base-offset"
25#define RISCV_MERGE_BASE_OFFSET_NAME "RISC-V Merge Base Offset"
26namespace {
27
28class RISCVMergeBaseOffsetOpt : public MachineFunctionPass {
29 const RISCVSubtarget *ST = nullptr;
31
32public:
33 static char ID;
34 bool runOnMachineFunction(MachineFunction &Fn) override;
35 bool detectFoldable(MachineInstr &Hi, MachineInstr *&Lo);
36
37 bool detectAndFoldOffset(MachineInstr &Hi, MachineInstr &Lo);
38 bool foldOffset(MachineInstr &Hi, MachineInstr &Lo, MachineInstr &Tail,
39 int64_t Offset);
40 bool foldLargeOffset(MachineInstr &Hi, MachineInstr &Lo,
41 MachineInstr &TailAdd, Register GSReg);
42 bool foldShiftedOffset(MachineInstr &Hi, MachineInstr &Lo,
43 MachineInstr &TailShXAdd, Register GSReg);
44
45 bool foldIntoMemoryOps(MachineInstr &Hi, MachineInstr &Lo);
46
47 RISCVMergeBaseOffsetOpt() : MachineFunctionPass(ID) {}
48
49 MachineFunctionProperties getRequiredProperties() const override {
50 return MachineFunctionProperties().setIsSSA();
51 }
52
53 void getAnalysisUsage(AnalysisUsage &AU) const override {
54 AU.setPreservesCFG();
56 }
57
58 StringRef getPassName() const override {
60 }
61};
62} // end anonymous namespace
63
64char RISCVMergeBaseOffsetOpt::ID = 0;
65INITIALIZE_PASS(RISCVMergeBaseOffsetOpt, DEBUG_TYPE,
66 RISCV_MERGE_BASE_OFFSET_NAME, false, false)
67
68// Detect either of the patterns:
69//
70// 1. (medlow pattern):
71// lui vreg1, %hi(s)
72// addi vreg2, vreg1, %lo(s)
73//
74// 2. (medany pattern):
75// .Lpcrel_hi1:
76// auipc vreg1, %pcrel_hi(s)
77// addi vreg2, vreg1, %pcrel_lo(.Lpcrel_hi1)
78//
79// The pattern is only accepted if:
80// 1) The first instruction has only one use, which is the ADDI.
81// 2) The address operands have the appropriate type, reflecting the
82// lowering of a global address or constant pool using medlow or medany.
83// 3) The offset value in the Global Address or Constant Pool is 0.
84bool RISCVMergeBaseOffsetOpt::detectFoldable(MachineInstr &Hi,
85 MachineInstr *&Lo) {
86 auto HiOpc = Hi.getOpcode();
87 if (HiOpc != RISCV::LUI && HiOpc != RISCV::AUIPC &&
88 HiOpc != RISCV::PseudoMovAddr)
89 return false;
90
91 const MachineOperand &HiOp1 = Hi.getOperand(1);
92 unsigned ExpectedFlags =
93 HiOpc == RISCV::AUIPC ? RISCVII::MO_PCREL_HI : RISCVII::MO_HI;
94 if (HiOp1.getTargetFlags() != ExpectedFlags)
95 return false;
96
97 if (!(HiOp1.isGlobal() || HiOp1.isCPI() || HiOp1.isBlockAddress()) ||
98 HiOp1.getOffset() != 0)
99 return false;
100
101 if (HiOpc == RISCV::PseudoMovAddr) {
102 // Most of the code should handle it correctly without modification by
103 // setting Lo and Hi both point to PseudoMovAddr
104 Lo = &Hi;
105 } else {
106 Register HiDestReg = Hi.getOperand(0).getReg();
107 if (!MRI->hasOneUse(HiDestReg))
108 return false;
109
110 Lo = &*MRI->use_instr_begin(HiDestReg);
111 if (Lo->getOpcode() != RISCV::ADDI)
112 return false;
113 }
114
115 const MachineOperand &LoOp2 = Lo->getOperand(2);
116 if (HiOpc == RISCV::LUI || HiOpc == RISCV::PseudoMovAddr) {
117 if (LoOp2.getTargetFlags() != RISCVII::MO_LO ||
118 !(LoOp2.isGlobal() || LoOp2.isCPI() || LoOp2.isBlockAddress()) ||
119 LoOp2.getOffset() != 0)
120 return false;
121 } else {
122 assert(HiOpc == RISCV::AUIPC);
123 if (LoOp2.getTargetFlags() != RISCVII::MO_PCREL_LO ||
125 return false;
126 }
127
128 if (HiOp1.isGlobal()) {
129 LLVM_DEBUG(dbgs() << " Found lowered global address: "
130 << *HiOp1.getGlobal() << "\n");
131 } else if (HiOp1.isBlockAddress()) {
132 LLVM_DEBUG(dbgs() << " Found lowered basic address: "
133 << *HiOp1.getBlockAddress() << "\n");
134 } else if (HiOp1.isCPI()) {
135 LLVM_DEBUG(dbgs() << " Found lowered constant pool: " << HiOp1.getIndex()
136 << "\n");
137 }
138
139 return true;
140}
141
142// Update the offset in Hi and Lo instructions.
143// Delete the tail instruction and update all the uses to use the
144// output from Lo.
145bool RISCVMergeBaseOffsetOpt::foldOffset(MachineInstr &Hi, MachineInstr &Lo,
146 MachineInstr &Tail, int64_t Offset) {
147 assert(isInt<32>(Offset) && "Unexpected offset");
148
149 // If Hi is an AUIPC, don't fold the offset if it is outside the bounds of
150 // the global object. The object may be within 2GB of the PC, but addresses
151 // outside of the object might not be.
152 auto HiOpc = Hi.getOpcode();
153 if (HiOpc == RISCV::AUIPC && Hi.getOperand(1).isGlobal()) {
154 const GlobalValue *GV = Hi.getOperand(1).getGlobal();
155 Type *Ty = GV->getValueType();
156 if (!Ty->isSized() || Offset < 0 ||
157 (uint64_t)Offset > GV->getDataLayout().getTypeAllocSize(Ty))
158 return false;
159 }
160
161 // Put the offset back in Hi and the Lo
162 Hi.getOperand(1).setOffset(Offset);
163 if (HiOpc != RISCV::AUIPC)
164 Lo.getOperand(2).setOffset(Offset);
165 // Delete the tail instruction.
166 Register LoOp0Reg = Lo.getOperand(0).getReg();
167 Register TailOp0Reg = Tail.getOperand(0).getReg();
168 MRI->constrainRegClass(LoOp0Reg, MRI->getRegClass(TailOp0Reg));
169 MRI->replaceRegWith(TailOp0Reg, LoOp0Reg);
170 Tail.eraseFromParent();
171 LLVM_DEBUG(dbgs() << " Merged offset " << Offset << " into base.\n"
172 << " " << Hi << " " << Lo;);
173 return true;
174}
175
176// Detect patterns for large offsets that are passed into an ADD instruction.
177// If the pattern is found, updates the offset in Hi and Lo instructions
178// and deletes TailAdd and the instructions that produced the offset.
179//
180// Base address lowering is of the form:
181// Hi: lui vreg1, %hi(s)
182// Lo: addi vreg2, vreg1, %lo(s)
183// / \
184// / \
185// / \
186// / The large offset can be of two forms: \
187// 1) Offset that has non zero bits in lower 2) Offset that has non zero
188// 12 bits and upper 20 bits bits in upper 20 bits only
189// OffseLUI: lui vreg3, 4
190// OffsetTail: addi voff, vreg3, 188 OffsetTail: lui voff, 128
191// \ /
192// \ /
193// \ /
194// \ /
195// TailAdd: add vreg4, vreg2, voff
196bool RISCVMergeBaseOffsetOpt::foldLargeOffset(MachineInstr &Hi,
197 MachineInstr &Lo,
198 MachineInstr &TailAdd,
199 Register GAReg) {
200 assert((TailAdd.getOpcode() == RISCV::ADD) && "Expected ADD instruction!");
201 Register Rs = TailAdd.getOperand(1).getReg();
202 Register Rt = TailAdd.getOperand(2).getReg();
203 Register Reg = Rs == GAReg ? Rt : Rs;
204
205 // Can't fold if the register has more than one use.
206 if (!Reg.isVirtual() || !MRI->hasOneUse(Reg))
207 return false;
208 // This can point to an ADDI(W) or a LUI:
209 MachineInstr &OffsetTail = *MRI->getVRegDef(Reg);
210 auto OffsetTailOpc = OffsetTail.getOpcode();
211 if (OffsetTailOpc == RISCV::ADDI || OffsetTailOpc == RISCV::ADDIW) {
212 // The offset value has non zero bits in both %hi and %lo parts.
213 // Detect an ADDI that feeds from a LUI instruction.
214 MachineOperand &AddiImmOp = OffsetTail.getOperand(2);
215 if (AddiImmOp.getTargetFlags() != RISCVII::MO_None)
216 return false;
217 Register AddiReg = OffsetTail.getOperand(1).getReg();
218 int64_t OffLo = AddiImmOp.getImm();
219
220 // Handle rs1 of ADDI is X0.
221 if (AddiReg == RISCV::X0) {
222 LLVM_DEBUG(dbgs() << " Offset Instrs: " << OffsetTail);
223 if (!foldOffset(Hi, Lo, TailAdd, OffLo))
224 return false;
225 OffsetTail.eraseFromParent();
226 return true;
227 }
228
229 MachineInstr &OffsetLui = *MRI->getVRegDef(AddiReg);
230 MachineOperand &LuiImmOp = OffsetLui.getOperand(1);
231 if (OffsetLui.getOpcode() != RISCV::LUI ||
232 LuiImmOp.getTargetFlags() != RISCVII::MO_None ||
233 !MRI->hasOneUse(OffsetLui.getOperand(0).getReg()))
234 return false;
235 int64_t Offset = SignExtend64<32>(LuiImmOp.getImm() << 12);
236 Offset += OffLo;
237 // RV32 ignores the upper 32 bits. ADDIW sign extends the result.
238 if (!ST->is64Bit() || OffsetTailOpc == RISCV::ADDIW)
240 // We can only fold simm32 offsets.
241 if (!isInt<32>(Offset))
242 return false;
243 LLVM_DEBUG(dbgs() << " Offset Instrs: " << OffsetTail
244 << " " << OffsetLui);
245 if (!foldOffset(Hi, Lo, TailAdd, Offset))
246 return false;
247 OffsetTail.eraseFromParent();
248 OffsetLui.eraseFromParent();
249 return true;
250 } else if (OffsetTailOpc == RISCV::LUI) {
251 // The offset value has all zero bits in the lower 12 bits. Only LUI
252 // exists.
253 LLVM_DEBUG(dbgs() << " Offset Instr: " << OffsetTail);
254 int64_t Offset = SignExtend64<32>(OffsetTail.getOperand(1).getImm() << 12);
255 if (!foldOffset(Hi, Lo, TailAdd, Offset))
256 return false;
257 OffsetTail.eraseFromParent();
258 return true;
259 }
260 return false;
261}
262
263// Detect patterns for offsets that are passed into a SHXADD instruction.
264// The offset has 1, 2, or 3 trailing zeros and fits in simm13, simm14, simm15.
265// The constant is created with addi voff, x0, C, and shXadd is used to
266// fill insert the trailing zeros and do the addition.
267// If the pattern is found, updates the offset in Hi and Lo instructions
268// and deletes TailShXAdd and the instructions that produced the offset.
269//
270// Hi: lui vreg1, %hi(s)
271// Lo: addi vreg2, vreg1, %lo(s)
272// OffsetTail: addi voff, x0, C
273// TailAdd: shXadd vreg4, voff, vreg2
274bool RISCVMergeBaseOffsetOpt::foldShiftedOffset(MachineInstr &Hi,
275 MachineInstr &Lo,
276 MachineInstr &TailShXAdd,
277 Register GAReg) {
278 assert((TailShXAdd.getOpcode() == RISCV::SH1ADD ||
279 TailShXAdd.getOpcode() == RISCV::SH2ADD ||
280 TailShXAdd.getOpcode() == RISCV::SH3ADD) &&
281 "Expected SHXADD instruction!");
282
283 if (GAReg != TailShXAdd.getOperand(2).getReg())
284 return false;
285
286 // The first source is the shifted operand.
287 Register Rs1 = TailShXAdd.getOperand(1).getReg();
288
289 // Can't fold if the register has more than one use.
290 if (!Rs1.isVirtual() || !MRI->hasOneUse(Rs1))
291 return false;
292 // This can point to an ADDI X0, C.
293 MachineInstr &OffsetTail = *MRI->getVRegDef(Rs1);
294 if (OffsetTail.getOpcode() != RISCV::ADDI)
295 return false;
296 if (!OffsetTail.getOperand(1).isReg() ||
297 OffsetTail.getOperand(1).getReg() != RISCV::X0 ||
298 !OffsetTail.getOperand(2).isImm())
299 return false;
300
301 int64_t Offset = OffsetTail.getOperand(2).getImm();
302 assert(isInt<12>(Offset) && "Unexpected offset");
303
304 unsigned ShAmt;
305 switch (TailShXAdd.getOpcode()) {
306 default: llvm_unreachable("Unexpected opcode");
307 case RISCV::SH1ADD: ShAmt = 1; break;
308 case RISCV::SH2ADD: ShAmt = 2; break;
309 case RISCV::SH3ADD: ShAmt = 3; break;
310 }
311
312 Offset = (uint64_t)Offset << ShAmt;
313
314 LLVM_DEBUG(dbgs() << " Offset Instr: " << OffsetTail);
315 if (!foldOffset(Hi, Lo, TailShXAdd, Offset))
316 return false;
317 OffsetTail.eraseFromParent();
318 return true;
319}
320
321bool RISCVMergeBaseOffsetOpt::detectAndFoldOffset(MachineInstr &Hi,
322 MachineInstr &Lo) {
323 Register DestReg = Lo.getOperand(0).getReg();
324
325 // Look for arithmetic instructions we can get an offset from.
326 // We might be able to remove the arithmetic instructions by folding the
327 // offset into the LUI+ADDI.
328 if (!MRI->hasOneUse(DestReg))
329 return false;
330
331 // Lo has only one use.
332 MachineInstr &Tail = *MRI->use_instr_begin(DestReg);
333 switch (Tail.getOpcode()) {
334 default:
335 LLVM_DEBUG(dbgs() << "Don't know how to get offset from this instr:"
336 << Tail);
337 break;
338 case RISCV::ADDI: {
339 // Offset is simply an immediate operand.
340 int64_t Offset = Tail.getOperand(2).getImm();
341
342 // We might have two ADDIs in a row.
343 Register TailDestReg = Tail.getOperand(0).getReg();
344 if (MRI->hasOneUse(TailDestReg)) {
345 MachineInstr &TailTail = *MRI->use_instr_begin(TailDestReg);
346 if (TailTail.getOpcode() == RISCV::ADDI) {
347 Offset += TailTail.getOperand(2).getImm();
348 LLVM_DEBUG(dbgs() << " Offset Instrs: " << Tail << TailTail);
349 if (!foldOffset(Hi, Lo, TailTail, Offset))
350 return false;
351 Tail.eraseFromParent();
352 return true;
353 }
354 }
355
356 LLVM_DEBUG(dbgs() << " Offset Instr: " << Tail);
357 return foldOffset(Hi, Lo, Tail, Offset);
358 }
359 case RISCV::ADD:
360 // The offset is too large to fit in the immediate field of ADDI.
361 // This can be in two forms:
362 // 1) LUI hi_Offset followed by:
363 // ADDI lo_offset
364 // This happens in case the offset has non zero bits in
365 // both hi 20 and lo 12 bits.
366 // 2) LUI (offset20)
367 // This happens in case the lower 12 bits of the offset are zeros.
368 return foldLargeOffset(Hi, Lo, Tail, DestReg);
369 case RISCV::SH1ADD:
370 case RISCV::SH2ADD:
371 case RISCV::SH3ADD:
372 // The offset is too large to fit in the immediate field of ADDI.
373 // It may be encoded as (SH2ADD (ADDI X0, C), DestReg) or
374 // (SH3ADD (ADDI X0, C), DestReg).
375 return foldShiftedOffset(Hi, Lo, Tail, DestReg);
376 }
377
378 return false;
379}
380
381bool RISCVMergeBaseOffsetOpt::foldIntoMemoryOps(MachineInstr &Hi,
382 MachineInstr &Lo) {
383 Register DestReg = Lo.getOperand(0).getReg();
384
385 // If all the uses are memory ops with the same offset, we can transform:
386 //
387 // 1. (medlow pattern):
388 // Hi: lui vreg1, %hi(foo) ---> lui vreg1, %hi(foo+8)
389 // Lo: addi vreg2, vreg1, %lo(foo) ---> lw vreg3, lo(foo+8)(vreg1)
390 // Tail: lw vreg3, 8(vreg2)
391 //
392 // 2. (medany pattern):
393 // Hi: 1:auipc vreg1, %pcrel_hi(s) ---> auipc vreg1, %pcrel_hi(foo+8)
394 // Lo: addi vreg2, vreg1, %pcrel_lo(1b) ---> lw vreg3, %pcrel_lo(1b)(vreg1)
395 // Tail: lw vreg3, 8(vreg2)
396
397 std::optional<int64_t> CommonOffset;
398 DenseMap<const MachineInstr *, SmallVector<unsigned>>
399 InlineAsmMemoryOpIndexesMap;
400 for (const MachineInstr &UseMI : MRI->use_instructions(DestReg)) {
401 switch (UseMI.getOpcode()) {
402 default:
403 LLVM_DEBUG(dbgs() << "Not a load or store instruction: " << UseMI);
404 return false;
405 case RISCV::LB:
406 case RISCV::LH:
407 case RISCV::LH_INX:
408 case RISCV::LW:
409 case RISCV::LW_INX:
410 case RISCV::LBU:
411 case RISCV::LHU:
412 case RISCV::LWU:
413 case RISCV::LD:
414 case RISCV::LD_RV32:
415 case RISCV::FLH:
416 case RISCV::FLW:
417 case RISCV::FLD:
418 case RISCV::SB:
419 case RISCV::SH:
420 case RISCV::SH_INX:
421 case RISCV::SW:
422 case RISCV::SW_INX:
423 case RISCV::SD:
424 case RISCV::SD_RV32:
425 case RISCV::FSH:
426 case RISCV::FSW:
427 case RISCV::FSD: {
428 if (UseMI.getOperand(1).isFI())
429 return false;
430 // Register defined by Lo should not be the value register.
431 if (DestReg == UseMI.getOperand(0).getReg())
432 return false;
433 assert(DestReg == UseMI.getOperand(1).getReg() &&
434 "Expected base address use");
435 // All load/store instructions must use the same offset.
436 int64_t Offset = UseMI.getOperand(2).getImm();
437 if (CommonOffset && Offset != CommonOffset)
438 return false;
439 CommonOffset = Offset;
440 break;
441 }
442 case RISCV::INLINEASM:
443 case RISCV::INLINEASM_BR: {
444 SmallVector<unsigned> InlineAsmMemoryOpIndexes;
445 unsigned NumOps = 0;
446 for (unsigned I = InlineAsm::MIOp_FirstOperand;
447 I < UseMI.getNumOperands(); I += 1 + NumOps) {
448 const MachineOperand &FlagsMO = UseMI.getOperand(I);
449 // Should be an imm.
450 if (!FlagsMO.isImm())
451 continue;
452
453 const InlineAsm::Flag Flags(FlagsMO.getImm());
454 NumOps = Flags.getNumOperandRegisters();
455
456 // Memory constraints have two operands.
457 if (NumOps != 2 || !Flags.isMemKind()) {
458 // If the register is used by something other than a memory
459 // constraint, we should not fold.
460 for (unsigned J = 0; J < NumOps; ++J) {
461 const MachineOperand &MO = UseMI.getOperand(I + 1 + J);
462 if (MO.isReg() && MO.getReg() == DestReg)
463 return false;
464 }
465 continue;
466 }
467
468 // We can't do this for constraint A because AMO instructions don't have
469 // an immediate offset field.
470 if (Flags.getMemoryConstraintID() == InlineAsm::ConstraintCode::A)
471 return false;
472
473 const MachineOperand &AddrMO = UseMI.getOperand(I + 1);
474 if (!AddrMO.isReg() || AddrMO.getReg() != DestReg)
475 continue;
476
477 const MachineOperand &OffsetMO = UseMI.getOperand(I + 2);
478 if (!OffsetMO.isImm())
479 continue;
480
481 // All inline asm memory operands must use the same offset.
482 int64_t Offset = OffsetMO.getImm();
483 if (CommonOffset && Offset != CommonOffset)
484 return false;
485 CommonOffset = Offset;
486 InlineAsmMemoryOpIndexes.push_back(I + 1);
487 }
488 InlineAsmMemoryOpIndexesMap.insert(
489 std::make_pair(&UseMI, InlineAsmMemoryOpIndexes));
490 break;
491 }
492 }
493 }
494
495 // We found a common offset.
496 // Update the offsets in global address lowering.
497 // We may have already folded some arithmetic so we need to add to any
498 // existing offset.
499 int64_t NewOffset = Hi.getOperand(1).getOffset() + *CommonOffset;
500 // RV32 ignores the upper 32 bits.
501 if (!ST->is64Bit())
502 NewOffset = SignExtend64<32>(NewOffset);
503 // We can only fold simm32 offsets.
504 if (!isInt<32>(NewOffset))
505 return false;
506
507 Hi.getOperand(1).setOffset(NewOffset);
508 MachineOperand &ImmOp = Lo.getOperand(2);
509 auto HiOpc = Hi.getOpcode();
510 // Expand PseudoMovAddr into LUI
511 if (HiOpc == RISCV::PseudoMovAddr) {
512 auto *TII = ST->getInstrInfo();
513 Hi.setDesc(TII->get(RISCV::LUI));
514 Hi.removeOperand(2);
515 }
516
517 if (HiOpc != RISCV::AUIPC)
518 ImmOp.setOffset(NewOffset);
519
520 // Update the immediate in the load/store instructions to add the offset.
521 for (MachineInstr &UseMI :
522 llvm::make_early_inc_range(MRI->use_instructions(DestReg))) {
523 if (UseMI.getOpcode() == RISCV::INLINEASM ||
524 UseMI.getOpcode() == RISCV::INLINEASM_BR) {
525 auto &InlineAsmMemoryOpIndexes = InlineAsmMemoryOpIndexesMap[&UseMI];
526 for (unsigned I : InlineAsmMemoryOpIndexes) {
527 MachineOperand &MO = UseMI.getOperand(I + 1);
528 switch (ImmOp.getType()) {
530 MO.ChangeToGA(ImmOp.getGlobal(), ImmOp.getOffset(),
531 ImmOp.getTargetFlags());
532 break;
534 MO.ChangeToMCSymbol(ImmOp.getMCSymbol(), ImmOp.getTargetFlags());
535 MO.setOffset(ImmOp.getOffset());
536 break;
538 MO.ChangeToBA(ImmOp.getBlockAddress(), ImmOp.getOffset(),
539 ImmOp.getTargetFlags());
540 break;
541 default:
542 report_fatal_error("unsupported machine operand type");
543 break;
544 }
545 }
546 } else {
547 UseMI.removeOperand(2);
548 UseMI.addOperand(ImmOp);
549 }
550 }
551
552 // Prevent Lo (originally PseudoMovAddr, which is also pointed by Hi) from
553 // being erased
554 if (&Lo == &Hi)
555 return true;
556
557 MRI->replaceRegWith(Lo.getOperand(0).getReg(), Hi.getOperand(0).getReg());
558 Lo.eraseFromParent();
559 return true;
560}
561
562bool RISCVMergeBaseOffsetOpt::runOnMachineFunction(MachineFunction &Fn) {
563 if (skipFunction(Fn.getFunction()))
564 return false;
565
566 ST = &Fn.getSubtarget<RISCVSubtarget>();
567
568 bool MadeChange = false;
569 MRI = &Fn.getRegInfo();
570 for (MachineBasicBlock &MBB : Fn) {
571 LLVM_DEBUG(dbgs() << "MBB: " << MBB.getName() << "\n");
572 for (MachineInstr &Hi : MBB) {
573 MachineInstr *Lo = nullptr;
574 if (!detectFoldable(Hi, Lo))
575 continue;
576 MadeChange |= detectAndFoldOffset(Hi, *Lo);
577 MadeChange |= foldIntoMemoryOps(Hi, *Lo);
578 }
579 }
580
581 return MadeChange;
582}
583
584/// Returns an instance of the Merge Base Offset Optimization pass.
586 return new RISCVMergeBaseOffsetOpt();
587}
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder & UseMI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock & MBB
#define DEBUG_TYPE
const HexagonInstrInfo * TII
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
#define I(x, y, z)
Definition MD5.cpp:57
Register Reg
Promote Memory to Register
Definition Mem2Reg.cpp:110
if(PassOpts->AAPipeline)
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
Definition PassSupport.h:56
#define RISCV_MERGE_BASE_OFFSET_NAME
#define LLVM_DEBUG(...)
Definition Debug.h:114
Represent the analysis usage information of a pass.
LLVM_ABI void setPreservesCFG()
This function should be called by the pass, iff they do not:
Definition Pass.cpp:270
LLVM_ABI TypeSize getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition DenseMap.h:241
FunctionPass class - This class is used to implement most global optimizations.
Definition Pass.h:314
LLVM_ABI const DataLayout & getDataLayout() const
Get the data layout of the module this global belongs to.
Definition Globals.cpp:132
Type * getValueType() const
LLVM_ABI StringRef getName() const
Return the name of the corresponding LLVM basic block, or an empty string.
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
Properties which a MachineFunction may have at a given point in time.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
Register getReg(unsigned Idx) const
Get the register for the operand index.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
LLVM_ABI void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
const MachineOperand & getOperand(unsigned i) const
MachineOperand class - Representation of each machine instruction operand.
const GlobalValue * getGlobal() const
int64_t getImm() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
bool isCPI() const
isCPI - Tests if this is a MO_ConstantPoolIndex operand.
LLVM_ABI void ChangeToMCSymbol(MCSymbol *Sym, unsigned TargetFlags=0)
ChangeToMCSymbol - Replace this operand with a new MC symbol operand.
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
LLVM_ABI void ChangeToGA(const GlobalValue *GV, int64_t Offset, unsigned TargetFlags=0)
ChangeToGA - Replace this operand with a new global address operand.
LLVM_ABI void ChangeToBA(const BlockAddress *BA, int64_t Offset, unsigned TargetFlags=0)
ChangeToBA - Replace this operand with a new block address operand.
const BlockAddress * getBlockAddress() const
void setOffset(int64_t Offset)
unsigned getTargetFlags() const
bool isGlobal() const
isGlobal - Tests if this is a MO_GlobalAddress operand.
MachineOperandType getType() const
getType - Returns the MachineOperandType for this operand.
bool isBlockAddress() const
isBlockAddress - Tests if this is a MO_BlockAddress operand.
Register getReg() const
getReg - Returns the register number.
MCSymbol * getMCSymbol() const
@ MO_MCSymbol
MCSymbol reference (for debug/eh info)
@ MO_GlobalAddress
Address of a global value.
@ MO_BlockAddress
Address of a basic block.
int64_t getOffset() const
Return the offset from the symbol in this operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
const RISCVInstrInfo * getInstrInfo() const override
Wrapper class representing virtual and physical registers.
Definition Register.h:20
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
Definition Register.h:79
void push_back(const T &Elt)
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition Type.h:311
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ Tail
Attemps to make calls as fast as possible while guaranteeing that tail call optimization can always b...
Definition CallingConv.h:76
This is an optimization pass for GlobalISel generic memory operations.
@ Offset
Definition DWP.cpp:532
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
Definition MathExtras.h:165
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition STLExtras.h:632
FunctionPass * createRISCVMergeBaseOffsetOptPass()
Returns an instance of the Merge Base Offset Optimization pass.
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:167
constexpr int64_t SignExtend64(uint64_t x)
Sign-extend the number in the bottom B bits of X to a 64-bit integer.
Definition MathExtras.h:572