LLVM 22.0.0git
X86FlagsCopyLowering.cpp
Go to the documentation of this file.
1//====- X86FlagsCopyLowering.cpp - Lowers COPY nodes of EFLAGS ------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9///
10/// Lowers COPY nodes of EFLAGS by directly extracting and preserving individual
11/// flag bits.
12///
13/// We have to do this by carefully analyzing and rewriting the usage of the
14/// copied EFLAGS register because there is no general way to rematerialize the
15/// entire EFLAGS register safely and efficiently. Using `popf` both forces
16/// dynamic stack adjustment and can create correctness issues due to IF, TF,
17/// and other non-status flags being overwritten. Using sequences involving
18/// SAHF don't work on all x86 processors and are often quite slow compared to
19/// directly testing a single status preserved in its own GPR.
20///
21//===----------------------------------------------------------------------===//
22
23#include "X86.h"
24#include "X86InstrInfo.h"
25#include "X86Subtarget.h"
28#include "llvm/ADT/STLExtras.h"
29#include "llvm/ADT/ScopeExit.h"
32#include "llvm/ADT/Statistic.h"
50#include "llvm/IR/Analysis.h"
51#include "llvm/IR/DebugLoc.h"
52#include "llvm/MC/MCSchedule.h"
53#include "llvm/Pass.h"
54#include "llvm/Support/Debug.h"
56#include <cassert>
57#include <iterator>
58#include <utility>
59
60using namespace llvm;
61
62#define PASS_KEY "x86-flags-copy-lowering"
63#define DEBUG_TYPE PASS_KEY
64
65STATISTIC(NumCopiesEliminated, "Number of copies of EFLAGS eliminated");
66STATISTIC(NumSetCCsInserted, "Number of setCC instructions inserted");
67STATISTIC(NumTestsInserted, "Number of test instructions inserted");
68STATISTIC(NumAddsInserted, "Number of adds instructions inserted");
69STATISTIC(NumNFsConvertedTo, "Number of NF instructions converted to");
70
72
73namespace {
74
75// Convenient array type for storing registers associated with each condition.
76using CondRegArray = std::array<Register, X86::LAST_VALID_COND + 1>;
77
78class X86FlagsCopyLoweringImpl {
79public:
80 X86FlagsCopyLoweringImpl(MachineDominatorTree *MDT) : MDT(MDT) {}
81
82 bool runOnMachineFunction(MachineFunction &MF);
83
84private:
85 MachineRegisterInfo *MRI = nullptr;
86 const X86Subtarget *Subtarget = nullptr;
87 const X86InstrInfo *TII = nullptr;
88 const TargetRegisterInfo *TRI = nullptr;
89 const TargetRegisterClass *PromoteRC = nullptr;
90 MachineDominatorTree *MDT = nullptr;
91
92 CondRegArray collectCondsInRegs(MachineBasicBlock &MBB,
94
95 Register promoteCondToReg(MachineBasicBlock &MBB,
97 const DebugLoc &TestLoc, X86::CondCode Cond);
98 std::pair<Register, bool> getCondOrInverseInReg(
99 MachineBasicBlock &TestMBB, MachineBasicBlock::iterator TestPos,
100 const DebugLoc &TestLoc, X86::CondCode Cond, CondRegArray &CondRegs);
101 void insertTest(MachineBasicBlock &MBB, MachineBasicBlock::iterator Pos,
102 const DebugLoc &Loc, Register Reg);
103
104 void rewriteSetCC(MachineBasicBlock &MBB, MachineBasicBlock::iterator Pos,
105 const DebugLoc &Loc, MachineInstr &MI,
106 CondRegArray &CondRegs);
107 void rewriteArithmetic(MachineBasicBlock &MBB,
109 MachineInstr &MI, CondRegArray &CondRegs);
110 void rewriteMI(MachineBasicBlock &MBB, MachineBasicBlock::iterator Pos,
111 const DebugLoc &Loc, MachineInstr &MI, CondRegArray &CondRegs);
112};
113
114class X86FlagsCopyLoweringLegacy : public MachineFunctionPass {
115public:
116 X86FlagsCopyLoweringLegacy() : MachineFunctionPass(ID) {}
117
118 StringRef getPassName() const override { return "X86 EFLAGS copy lowering"; }
119 bool runOnMachineFunction(MachineFunction &MF) override;
120 void getAnalysisUsage(AnalysisUsage &AU) const override;
121
122 /// Pass identification, replacement for typeid.
123 static char ID;
124};
125
126} // end anonymous namespace
127
128INITIALIZE_PASS_BEGIN(X86FlagsCopyLoweringLegacy, DEBUG_TYPE,
129 "X86 EFLAGS copy lowering", false, false)
130INITIALIZE_PASS_END(X86FlagsCopyLoweringLegacy, DEBUG_TYPE,
131 "X86 EFLAGS copy lowering", false, false)
132
134 return new X86FlagsCopyLoweringLegacy();
135}
136
137char X86FlagsCopyLoweringLegacy::ID = 0;
138
139void X86FlagsCopyLoweringLegacy::getAnalysisUsage(AnalysisUsage &AU) const {
142}
143
144static bool isArithmeticOp(unsigned Opc) {
145 return X86::isADC(Opc) || X86::isSBB(Opc) || X86::isRCL(Opc) ||
146 X86::isRCR(Opc) || (Opc == X86::SETB_C32r || Opc == X86::SETB_C64r);
147}
148
150 MachineInstr &SplitI,
151 const X86InstrInfo &TII) {
152 MachineFunction &MF = *MBB.getParent();
153
154 assert(SplitI.getParent() == &MBB &&
155 "Split instruction must be in the split block!");
156 assert(SplitI.isBranch() &&
157 "Only designed to split a tail of branch instructions!");
159 "Must split on an actual jCC instruction!");
160
161 // Dig out the previous instruction to the split point.
162 MachineInstr &PrevI = *std::prev(SplitI.getIterator());
163 assert(PrevI.isBranch() && "Must split after a branch!");
165 "Must split after an actual jCC instruction!");
166 assert(!std::prev(PrevI.getIterator())->isTerminator() &&
167 "Must only have this one terminator prior to the split!");
168
169 // Grab the one successor edge that will stay in `MBB`.
170 MachineBasicBlock &UnsplitSucc = *PrevI.getOperand(0).getMBB();
171
172 // Analyze the original block to see if we are actually splitting an edge
173 // into two edges. This can happen when we have multiple conditional jumps to
174 // the same successor.
175 bool IsEdgeSplit =
176 std::any_of(SplitI.getIterator(), MBB.instr_end(),
177 [&](MachineInstr &MI) {
178 assert(MI.isTerminator() &&
179 "Should only have spliced terminators!");
180 return llvm::any_of(
181 MI.operands(), [&](MachineOperand &MOp) {
182 return MOp.isMBB() && MOp.getMBB() == &UnsplitSucc;
183 });
184 }) ||
185 MBB.getFallThrough() == &UnsplitSucc;
186
187 MachineBasicBlock &NewMBB = *MF.CreateMachineBasicBlock();
188
189 // Insert the new block immediately after the current one. Any existing
190 // fallthrough will be sunk into this new block anyways.
191 MF.insert(std::next(MachineFunction::iterator(&MBB)), &NewMBB);
192
193 // Splice the tail of instructions into the new block.
194 NewMBB.splice(NewMBB.end(), &MBB, SplitI.getIterator(), MBB.end());
195
196 // Copy the necessary succesors (and their probability info) into the new
197 // block.
198 for (auto SI = MBB.succ_begin(), SE = MBB.succ_end(); SI != SE; ++SI)
199 if (IsEdgeSplit || *SI != &UnsplitSucc)
200 NewMBB.copySuccessor(&MBB, SI);
201 // Normalize the probabilities if we didn't end up splitting the edge.
202 if (!IsEdgeSplit)
203 NewMBB.normalizeSuccProbs();
204
205 // Now replace all of the moved successors in the original block with the new
206 // block. This will merge their probabilities.
207 for (MachineBasicBlock *Succ : NewMBB.successors())
208 if (Succ != &UnsplitSucc)
209 MBB.replaceSuccessor(Succ, &NewMBB);
210
211 // We should always end up replacing at least one successor.
212 assert(MBB.isSuccessor(&NewMBB) &&
213 "Failed to make the new block a successor!");
214
215 // Now update all the PHIs.
216 for (MachineBasicBlock *Succ : NewMBB.successors()) {
217 for (MachineInstr &MI : *Succ) {
218 if (!MI.isPHI())
219 break;
220
221 for (int OpIdx = 1, NumOps = MI.getNumOperands(); OpIdx < NumOps;
222 OpIdx += 2) {
223 MachineOperand &OpV = MI.getOperand(OpIdx);
224 MachineOperand &OpMBB = MI.getOperand(OpIdx + 1);
225 assert(OpMBB.isMBB() && "Block operand to a PHI is not a block!");
226 if (OpMBB.getMBB() != &MBB)
227 continue;
228
229 // Replace the operand for unsplit successors
230 if (!IsEdgeSplit || Succ != &UnsplitSucc) {
231 OpMBB.setMBB(&NewMBB);
232
233 // We have to continue scanning as there may be multiple entries in
234 // the PHI.
235 continue;
236 }
237
238 // When we have split the edge append a new successor.
239 MI.addOperand(MF, OpV);
240 MI.addOperand(MF, MachineOperand::CreateMBB(&NewMBB));
241 break;
242 }
243 }
244 }
245
246 return NewMBB;
247}
248
250
252 const MachineOperand *FlagDef =
253 MI.findRegisterDefOperand(X86::EFLAGS, /*TRI=*/nullptr);
254 if (!FlagDef)
255 return NoClobber;
256
257 // For the instructions are ADDrm/ADDmr with relocation, we'll skip the
258 // optimization for replacing non-NF with NF. This is to keep backward
259 // compatiblity with old version of linkers without APX relocation type
260 // support on Linux OS.
261 bool IsWithReloc =
263
264 if (FlagDef->isDead() && X86::getNFVariant(MI.getOpcode()) && !IsWithReloc)
265 return EvitableClobber;
266
267 return InevitableClobber;
268}
269
270bool X86FlagsCopyLoweringImpl::runOnMachineFunction(MachineFunction &MF) {
271 LLVM_DEBUG(dbgs() << "********** " << PASS_KEY << " : " << MF.getName()
272 << " **********\n");
273
274 Subtarget = &MF.getSubtarget<X86Subtarget>();
275 MRI = &MF.getRegInfo();
276 TII = Subtarget->getInstrInfo();
277 TRI = Subtarget->getRegisterInfo();
278 PromoteRC = &X86::GR8RegClass;
279
280 if (MF.empty())
281 // Nothing to do for a degenerate empty function...
282 return false;
283
284 if (none_of(MRI->def_instructions(X86::EFLAGS), [](const MachineInstr &MI) {
285 return MI.getOpcode() == TargetOpcode::COPY;
286 }))
287 return false;
288
289 // We change the code, so we don't preserve the dominator tree anyway. If we
290 // got a valid MDT from the pass manager, use that, otherwise construct one
291 // now. This is an optimization that avoids unnecessary MDT construction for
292 // functions that have no flag copies.
293 std::unique_ptr<MachineDominatorTree> OwnedMDT;
294 if (!MDT) {
295 OwnedMDT = std::make_unique<MachineDominatorTree>(MF);
296 MDT = OwnedMDT.get();
297 }
298
299 // Collect the copies in RPO so that when there are chains where a copy is in
300 // turn copied again we visit the first one first. This ensures we can find
301 // viable locations for testing the original EFLAGS that dominate all the
302 // uses across complex CFGs.
303 SmallSetVector<MachineInstr *, 4> Copies;
304 ReversePostOrderTraversal<MachineFunction *> RPOT(&MF);
305 for (MachineBasicBlock *MBB : RPOT)
306 for (MachineInstr &MI : *MBB)
307 if (MI.getOpcode() == TargetOpcode::COPY &&
308 MI.getOperand(0).getReg() == X86::EFLAGS)
309 Copies.insert(&MI);
310
311 // Try to elminate the copys by transform the instructions between copy and
312 // copydef to the NF (no flags update) variants, e.g.
313 //
314 // %1:gr64 = COPY $eflags
315 // OP1 implicit-def dead $eflags
316 // $eflags = COPY %1
317 // OP2 cc, implicit $eflags
318 //
319 // ->
320 //
321 // OP1_NF
322 // OP2 implicit $eflags
323 if (Subtarget->hasNF()) {
324 SmallSetVector<MachineInstr *, 4> RemovedCopies;
325 // CopyIIt may be invalidated by removing copies.
326 auto CopyIIt = Copies.begin(), CopyIEnd = Copies.end();
327 while (CopyIIt != CopyIEnd) {
328 auto NCopyIIt = std::next(CopyIIt);
329 SmallSetVector<MachineInstr *, 4> EvitableClobbers;
330 MachineInstr *CopyI = *CopyIIt;
331 MachineOperand &VOp = CopyI->getOperand(1);
332 MachineInstr *CopyDefI = MRI->getVRegDef(VOp.getReg());
333 MachineBasicBlock *CopyIMBB = CopyI->getParent();
334 MachineBasicBlock *CopyDefIMBB = CopyDefI->getParent();
335 // Walk all basic blocks reachable in depth-first iteration on the inverse
336 // CFG from CopyIMBB to CopyDefIMBB. These blocks are all the blocks that
337 // may be executed between the execution of CopyDefIMBB and CopyIMBB. On
338 // all execution paths, instructions from CopyDefI to CopyI (exclusive)
339 // has to be NF-convertible if it clobbers flags.
340 for (auto BI = idf_begin(CopyIMBB), BE = idf_end(CopyDefIMBB); BI != BE;
341 ++BI) {
342 MachineBasicBlock *MBB = *BI;
343 for (auto I = (MBB != CopyDefIMBB)
344 ? MBB->begin()
345 : std::next(MachineBasicBlock::iterator(CopyDefI)),
346 E = (MBB != CopyIMBB) ? MBB->end()
348 I != E; ++I) {
349 MachineInstr &MI = *I;
350 EFLAGSClobber ClobberType = getClobberType(MI);
351 if (ClobberType == NoClobber)
352 continue;
353
354 if (ClobberType == InevitableClobber)
355 goto ProcessNextCopyI;
356
357 assert(ClobberType == EvitableClobber && "unexpected workflow");
358 EvitableClobbers.insert(&MI);
359 }
360 }
361 // Covert evitable clobbers into NF variants and remove the copyies.
362 RemovedCopies.insert(CopyI);
363 CopyI->eraseFromParent();
364 if (MRI->use_nodbg_empty(CopyDefI->getOperand(0).getReg())) {
365 RemovedCopies.insert(CopyDefI);
366 CopyDefI->eraseFromParent();
367 }
368 ++NumCopiesEliminated;
369 for (auto *Clobber : EvitableClobbers) {
370 unsigned NewOpc = X86::getNFVariant(Clobber->getOpcode());
371 assert(NewOpc && "evitable clobber must have a NF variant");
372 Clobber->setDesc(TII->get(NewOpc));
373 Clobber->removeOperand(
374 Clobber->findRegisterDefOperand(X86::EFLAGS, /*TRI=*/nullptr)
375 ->getOperandNo());
376 ++NumNFsConvertedTo;
377 }
378 // Update liveins for basic blocks in the path
379 for (auto BI = idf_begin(CopyIMBB), BE = idf_end(CopyDefIMBB); BI != BE;
380 ++BI)
381 if (*BI != CopyDefIMBB)
382 BI->addLiveIn(X86::EFLAGS);
383 ProcessNextCopyI:
384 CopyIIt = NCopyIIt;
385 }
386 Copies.set_subtract(RemovedCopies);
387 }
388
389 // For the rest of copies that cannot be eliminated by NF transform, we use
390 // setcc to preserve the flags in GPR32 before OP1, and recheck its value
391 // before using the flags, e.g.
392 //
393 // %1:gr64 = COPY $eflags
394 // OP1 implicit-def dead $eflags
395 // $eflags = COPY %1
396 // OP2 cc, implicit $eflags
397 //
398 // ->
399 //
400 // %1:gr8 = SETCCr cc, implicit $eflags
401 // OP1 implicit-def dead $eflags
402 // TEST8rr %1, %1, implicit-def $eflags
403 // OP2 ne, implicit $eflags
404 for (MachineInstr *CopyI : Copies) {
405 MachineBasicBlock &MBB = *CopyI->getParent();
406
407 MachineOperand &VOp = CopyI->getOperand(1);
408 assert(VOp.isReg() &&
409 "The input to the copy for EFLAGS should always be a register!");
410 MachineInstr &CopyDefI = *MRI->getVRegDef(VOp.getReg());
411 if (CopyDefI.getOpcode() != TargetOpcode::COPY) {
412 // FIXME: The big likely candidate here are PHI nodes. We could in theory
413 // handle PHI nodes, but it gets really, really hard. Insanely hard. Hard
414 // enough that it is probably better to change every other part of LLVM
415 // to avoid creating them. The issue is that once we have PHIs we won't
416 // know which original EFLAGS value we need to capture with our setCCs
417 // below. The end result will be computing a complete set of setCCs that
418 // we *might* want, computing them in every place where we copy *out* of
419 // EFLAGS and then doing SSA formation on all of them to insert necessary
420 // PHI nodes and consume those here. Then hoping that somehow we DCE the
421 // unnecessary ones. This DCE seems very unlikely to be successful and so
422 // we will almost certainly end up with a glut of dead setCC
423 // instructions. Until we have a motivating test case and fail to avoid
424 // it by changing other parts of LLVM's lowering, we refuse to handle
425 // this complex case here.
427 dbgs() << "ERROR: Encountered unexpected def of an eflags copy: ";
428 CopyDefI.dump());
430 "Cannot lower EFLAGS copy unless it is defined in turn by a copy!");
431 }
432
434 // All uses of the EFLAGS copy are now rewritten, kill the copy into
435 // eflags and if dead the copy from.
436 CopyI->eraseFromParent();
437 if (MRI->use_empty(CopyDefI.getOperand(0).getReg()))
438 CopyDefI.eraseFromParent();
439 ++NumCopiesEliminated;
440 });
441
442 MachineOperand &DOp = CopyI->getOperand(0);
443 assert(DOp.isDef() && "Expected register def!");
444 assert(DOp.getReg() == X86::EFLAGS && "Unexpected copy def register!");
445 if (DOp.isDead())
446 continue;
447
448 MachineBasicBlock *TestMBB = CopyDefI.getParent();
449 auto TestPos = CopyDefI.getIterator();
450 DebugLoc TestLoc = CopyDefI.getDebugLoc();
451
452 LLVM_DEBUG(dbgs() << "Rewriting copy: "; CopyI->dump());
453
454 // Walk up across live-in EFLAGS to find where they were actually def'ed.
455 //
456 // This copy's def may just be part of a region of blocks covered by
457 // a single def of EFLAGS and we want to find the top of that region where
458 // possible.
459 //
460 // This is essentially a search for a *candidate* reaching definition
461 // location. We don't need to ever find the actual reaching definition here,
462 // but we want to walk up the dominator tree to find the highest point which
463 // would be viable for such a definition.
464 auto HasEFLAGSClobber = [&](MachineBasicBlock::iterator Begin,
466 // Scan backwards as we expect these to be relatively short and often find
467 // a clobber near the end.
468 return llvm::any_of(
469 llvm::reverse(llvm::make_range(Begin, End)), [&](MachineInstr &MI) {
470 // Flag any instruction (other than the copy we are
471 // currently rewriting) that defs EFLAGS.
472 return &MI != CopyI &&
473 MI.findRegisterDefOperand(X86::EFLAGS, /*TRI=*/nullptr);
474 });
475 };
476 auto HasEFLAGSClobberPath = [&](MachineBasicBlock *BeginMBB,
477 MachineBasicBlock *EndMBB) {
478 assert(MDT->dominates(BeginMBB, EndMBB) &&
479 "Only support paths down the dominator tree!");
480 SmallPtrSet<MachineBasicBlock *, 4> Visited;
481 SmallVector<MachineBasicBlock *, 4> Worklist;
482 // We terminate at the beginning. No need to scan it.
483 Visited.insert(BeginMBB);
484 Worklist.push_back(EndMBB);
485 do {
486 auto *MBB = Worklist.pop_back_val();
487 for (auto *PredMBB : MBB->predecessors()) {
488 if (!Visited.insert(PredMBB).second)
489 continue;
490 if (HasEFLAGSClobber(PredMBB->begin(), PredMBB->end()))
491 return true;
492 // Enqueue this block to walk its predecessors.
493 Worklist.push_back(PredMBB);
494 }
495 } while (!Worklist.empty());
496 // No clobber found along a path from the begin to end.
497 return false;
498 };
499 while (TestMBB->isLiveIn(X86::EFLAGS) && !TestMBB->pred_empty() &&
500 !HasEFLAGSClobber(TestMBB->begin(), TestPos)) {
501 // Find the nearest common dominator of the predecessors, as
502 // that will be the best candidate to hoist into.
503 MachineBasicBlock *HoistMBB =
504 std::accumulate(std::next(TestMBB->pred_begin()), TestMBB->pred_end(),
505 *TestMBB->pred_begin(),
506 [&](MachineBasicBlock *LHS, MachineBasicBlock *RHS) {
507 return MDT->findNearestCommonDominator(LHS, RHS);
508 });
509
510 // Now we need to scan all predecessors that may be reached along paths to
511 // the hoist block. A clobber anywhere in any of these blocks the hoist.
512 // Note that this even handles loops because we require *no* clobbers.
513 if (HasEFLAGSClobberPath(HoistMBB, TestMBB))
514 break;
515
516 // We also need the terminators to not sneakily clobber flags.
517 if (HasEFLAGSClobber(HoistMBB->getFirstTerminator()->getIterator(),
518 HoistMBB->instr_end()))
519 break;
520
521 // We found a viable location, hoist our test position to it.
522 TestMBB = HoistMBB;
523 TestPos = TestMBB->getFirstTerminator()->getIterator();
524 // Clear the debug location as it would just be confusing after hoisting.
525 TestLoc = DebugLoc();
526 }
527 LLVM_DEBUG({
528 auto DefIt = llvm::find_if(
529 llvm::reverse(llvm::make_range(TestMBB->instr_begin(), TestPos)),
530 [&](MachineInstr &MI) {
531 return MI.findRegisterDefOperand(X86::EFLAGS, /*TRI=*/nullptr);
532 });
533 if (DefIt.base() != TestMBB->instr_begin()) {
534 dbgs() << " Using EFLAGS defined by: ";
535 DefIt->dump();
536 } else {
537 dbgs() << " Using live-in flags for BB:\n";
538 TestMBB->dump();
539 }
540 });
541
542 // While rewriting uses, we buffer jumps and rewrite them in a second pass
543 // because doing so will perturb the CFG that we are walking to find the
544 // uses in the first place.
545 SmallVector<MachineInstr *, 4> JmpIs;
546
547 // Gather the condition flags that have already been preserved in
548 // registers. We do this from scratch each time as we expect there to be
549 // very few of them and we expect to not revisit the same copy definition
550 // many times. If either of those change sufficiently we could build a map
551 // of these up front instead.
552 CondRegArray CondRegs = collectCondsInRegs(*TestMBB, TestPos);
553
554 // Collect the basic blocks we need to scan. Typically this will just be
555 // a single basic block but we may have to scan multiple blocks if the
556 // EFLAGS copy lives into successors.
558 SmallPtrSet<MachineBasicBlock *, 2> VisitedBlocks;
559 Blocks.push_back(&MBB);
560
561 do {
562 MachineBasicBlock &UseMBB = *Blocks.pop_back_val();
563
564 // Track when if/when we find a kill of the flags in this block.
565 bool FlagsKilled = false;
566
567 // In most cases, we walk from the beginning to the end of the block. But
568 // when the block is the same block as the copy is from, we will visit it
569 // twice. The first time we start from the copy and go to the end. The
570 // second time we start from the beginning and go to the copy. This lets
571 // us handle copies inside of cycles.
572 // FIXME: This loop is *super* confusing. This is at least in part
573 // a symptom of all of this routine needing to be refactored into
574 // documentable components. Once done, there may be a better way to write
575 // this loop.
576 for (auto MII = (&UseMBB == &MBB && !VisitedBlocks.count(&UseMBB))
577 ? std::next(CopyI->getIterator())
578 : UseMBB.instr_begin(),
579 MIE = UseMBB.instr_end();
580 MII != MIE;) {
581 MachineInstr &MI = *MII++;
582 // If we are in the original copy block and encounter either the copy
583 // def or the copy itself, break so that we don't re-process any part of
584 // the block or process the instructions in the range that was copied
585 // over.
586 if (&MI == CopyI || &MI == &CopyDefI) {
587 assert(&UseMBB == &MBB && VisitedBlocks.count(&MBB) &&
588 "Should only encounter these on the second pass over the "
589 "original block.");
590 break;
591 }
592
593 MachineOperand *FlagUse =
594 MI.findRegisterUseOperand(X86::EFLAGS, /*TRI=*/nullptr);
595 FlagsKilled = MI.modifiesRegister(X86::EFLAGS, TRI);
596
597 if (!FlagUse && FlagsKilled)
598 break;
599 else if (!FlagUse)
600 continue;
601
602 LLVM_DEBUG(dbgs() << " Rewriting use: "; MI.dump());
603
604 // Check the kill flag before we rewrite as that may change it.
605 if (FlagUse->isKill())
606 FlagsKilled = true;
607
608 // Once we encounter a branch, the rest of the instructions must also be
609 // branches. We can't rewrite in place here, so we handle them below.
610 //
611 // Note that we don't have to handle tail calls here, even conditional
612 // tail calls, as those are not introduced into the X86 MI until post-RA
613 // branch folding or black placement. As a consequence, we get to deal
614 // with the simpler formulation of conditional branches followed by tail
615 // calls.
617 auto JmpIt = MI.getIterator();
618 do {
619 JmpIs.push_back(&*JmpIt);
620 ++JmpIt;
621 } while (JmpIt != UseMBB.instr_end() &&
623 break;
624 }
625
626 // Otherwise we can just rewrite in-place.
627 unsigned Opc = MI.getOpcode();
628 if (Opc == TargetOpcode::COPY) {
629 // Just replace this copy with the original copy def.
630 MRI->replaceRegWith(MI.getOperand(0).getReg(),
631 CopyDefI.getOperand(0).getReg());
632 MI.eraseFromParent();
633 } else if (X86::isSETCC(Opc) || X86::isSETZUCC(Opc)) {
634 rewriteSetCC(*TestMBB, TestPos, TestLoc, MI, CondRegs);
635 } else if (isArithmeticOp(Opc)) {
636 rewriteArithmetic(*TestMBB, TestPos, TestLoc, MI, CondRegs);
637 } else {
638 rewriteMI(*TestMBB, TestPos, TestLoc, MI, CondRegs);
639 }
640
641 // If this was the last use of the flags, we're done.
642 if (FlagsKilled)
643 break;
644 }
645
646 // If the flags were killed, we're done with this block.
647 if (FlagsKilled)
648 continue;
649
650 // Otherwise we need to scan successors for ones where the flags live-in
651 // and queue those up for processing.
652 for (MachineBasicBlock *SuccMBB : UseMBB.successors())
653 if (SuccMBB->isLiveIn(X86::EFLAGS) &&
654 VisitedBlocks.insert(SuccMBB).second) {
655 // We currently don't do any PHI insertion and so we require that the
656 // test basic block dominates all of the use basic blocks. Further, we
657 // can't have a cycle from the test block back to itself as that would
658 // create a cycle requiring a PHI to break it.
659 //
660 // We could in theory do PHI insertion here if it becomes useful by
661 // just taking undef values in along every edge that we don't trace
662 // this EFLAGS copy along. This isn't as bad as fully general PHI
663 // insertion, but still seems like a great deal of complexity.
664 //
665 // Because it is theoretically possible that some earlier MI pass or
666 // other lowering transformation could induce this to happen, we do
667 // a hard check even in non-debug builds here.
668 if (SuccMBB == TestMBB || !MDT->dominates(TestMBB, SuccMBB)) {
669 LLVM_DEBUG({
670 dbgs()
671 << "ERROR: Encountered use that is not dominated by our test "
672 "basic block! Rewriting this would require inserting PHI "
673 "nodes to track the flag state across the CFG.\n\nTest "
674 "block:\n";
675 TestMBB->dump();
676 dbgs() << "Use block:\n";
677 SuccMBB->dump();
678 });
680 "Cannot lower EFLAGS copy when original copy def "
681 "does not dominate all uses.");
682 }
683
684 Blocks.push_back(SuccMBB);
685
686 // After this, EFLAGS will be recreated before each use.
687 SuccMBB->removeLiveIn(X86::EFLAGS);
688 }
689 } while (!Blocks.empty());
690
691 // Now rewrite the jumps that use the flags. These we handle specially
692 // because if there are multiple jumps in a single basic block we'll have
693 // to do surgery on the CFG.
694 MachineBasicBlock *LastJmpMBB = nullptr;
695 for (MachineInstr *JmpI : JmpIs) {
696 // Past the first jump within a basic block we need to split the blocks
697 // apart.
698 if (JmpI->getParent() == LastJmpMBB)
699 splitBlock(*JmpI->getParent(), *JmpI, *TII);
700 else
701 LastJmpMBB = JmpI->getParent();
702
703 rewriteMI(*TestMBB, TestPos, TestLoc, *JmpI, CondRegs);
704 }
705
706 // FIXME: Mark the last use of EFLAGS before the copy's def as a kill if
707 // the copy's def operand is itself a kill.
708 }
709
710#ifndef NDEBUG
711 for (MachineBasicBlock &MBB : MF)
712 for (MachineInstr &MI : MBB)
713 if (MI.getOpcode() == TargetOpcode::COPY &&
714 (MI.getOperand(0).getReg() == X86::EFLAGS ||
715 MI.getOperand(1).getReg() == X86::EFLAGS)) {
716 LLVM_DEBUG(dbgs() << "ERROR: Found a COPY involving EFLAGS: ";
717 MI.dump());
718 llvm_unreachable("Unlowered EFLAGS copy!");
719 }
720#endif
721
722 return true;
723}
724
725/// Collect any conditions that have already been set in registers so that we
726/// can re-use them rather than adding duplicates.
727CondRegArray X86FlagsCopyLoweringImpl::collectCondsInRegs(
728 MachineBasicBlock &MBB, MachineBasicBlock::iterator TestPos) {
729 CondRegArray CondRegs = {};
730
731 // Scan backwards across the range of instructions with live EFLAGS.
732 for (MachineInstr &MI :
735 if (Cond != X86::COND_INVALID && !MI.mayStore() &&
736 MI.getOperand(0).isReg() && MI.getOperand(0).getReg().isVirtual()) {
737 assert(MI.getOperand(0).isDef() &&
738 "A non-storing SETcc should always define a register!");
739 CondRegs[Cond] = MI.getOperand(0).getReg();
740 }
741
742 // Stop scanning when we see the first definition of the EFLAGS as prior to
743 // this we would potentially capture the wrong flag state.
744 if (MI.findRegisterDefOperand(X86::EFLAGS, /*TRI=*/nullptr))
745 break;
746 }
747 return CondRegs;
748}
749
750Register X86FlagsCopyLoweringImpl::promoteCondToReg(
751 MachineBasicBlock &TestMBB, MachineBasicBlock::iterator TestPos,
752 const DebugLoc &TestLoc, X86::CondCode Cond) {
753 Register Reg = MRI->createVirtualRegister(PromoteRC);
754 auto SetI =
755 BuildMI(TestMBB, TestPos, TestLoc,
756 TII->get((!Subtarget->hasZU() || Subtarget->preferLegacySetCC())
757 ? X86::SETCCr
758 : X86::SETZUCCr),
759 Reg)
760 .addImm(Cond);
761 (void)SetI;
762 LLVM_DEBUG(dbgs() << " save cond: "; SetI->dump());
763 ++NumSetCCsInserted;
764 return Reg;
765}
766
767std::pair<Register, bool> X86FlagsCopyLoweringImpl::getCondOrInverseInReg(
768 MachineBasicBlock &TestMBB, MachineBasicBlock::iterator TestPos,
769 const DebugLoc &TestLoc, X86::CondCode Cond, CondRegArray &CondRegs) {
770 Register &CondReg = CondRegs[Cond];
771 Register &InvCondReg = CondRegs[X86::GetOppositeBranchCondition(Cond)];
772 if (!CondReg && !InvCondReg)
773 CondReg = promoteCondToReg(TestMBB, TestPos, TestLoc, Cond);
774
775 if (CondReg)
776 return {CondReg, false};
777 else
778 return {InvCondReg, true};
779}
780
781void X86FlagsCopyLoweringImpl::insertTest(MachineBasicBlock &MBB,
783 const DebugLoc &Loc, Register Reg) {
784 auto TestI =
785 BuildMI(MBB, Pos, Loc, TII->get(X86::TEST8rr)).addReg(Reg).addReg(Reg);
786 (void)TestI;
787 LLVM_DEBUG(dbgs() << " test cond: "; TestI->dump());
788 ++NumTestsInserted;
789}
790
791void X86FlagsCopyLoweringImpl::rewriteSetCC(MachineBasicBlock &MBB,
793 const DebugLoc &Loc,
794 MachineInstr &MI,
795 CondRegArray &CondRegs) {
797 // Note that we can't usefully rewrite this to the inverse without complex
798 // analysis of the users of the setCC. Largely we rely on duplicates which
799 // could have been avoided already being avoided here.
800 Register &CondReg = CondRegs[Cond];
801 if (!CondReg)
802 CondReg = promoteCondToReg(MBB, Pos, Loc, Cond);
803
804 // Rewriting a register def is trivial: we just replace the register and
805 // remove the setcc.
806 if (!MI.mayStore()) {
807 assert(MI.getOperand(0).isReg() &&
808 "Cannot have a non-register defined operand to SETcc!");
809 Register OldReg = MI.getOperand(0).getReg();
810 // Drop Kill flags on the old register before replacing. CondReg may have
811 // a longer live range.
812 MRI->clearKillFlags(OldReg);
813 MRI->replaceRegWith(OldReg, CondReg);
814 MI.eraseFromParent();
815 return;
816 }
817
818 // Otherwise, we need to emit a store.
819 auto MIB = BuildMI(*MI.getParent(), MI.getIterator(), MI.getDebugLoc(),
820 TII->get(X86::MOV8mr));
821 // Copy the address operands.
822 for (int i = 0; i < X86::AddrNumOperands; ++i)
823 MIB.add(MI.getOperand(i));
824
825 MIB.addReg(CondReg);
826 MIB.setMemRefs(MI.memoperands());
827 MI.eraseFromParent();
828}
829
830void X86FlagsCopyLoweringImpl::rewriteArithmetic(
831 MachineBasicBlock &MBB, MachineBasicBlock::iterator Pos,
832 const DebugLoc &Loc, MachineInstr &MI, CondRegArray &CondRegs) {
833 // Arithmetic is either reading CF or OF.
834 X86::CondCode Cond = X86::COND_B; // CF == 1
835 // The addend to use to reset CF or OF when added to the flag value.
836 // Set up an addend that when one is added will need a carry due to not
837 // having a higher bit available.
838 int Addend = 255;
839
840 // Now get a register that contains the value of the flag input to the
841 // arithmetic. We require exactly this flag to simplify the arithmetic
842 // required to materialize it back into the flag.
843 Register &CondReg = CondRegs[Cond];
844 if (!CondReg)
845 CondReg = promoteCondToReg(MBB, Pos, Loc, Cond);
846
847 // Insert an instruction that will set the flag back to the desired value.
848 Register TmpReg = MRI->createVirtualRegister(PromoteRC);
849 auto AddI =
850 BuildMI(*MI.getParent(), MI.getIterator(), MI.getDebugLoc(),
851 TII->get(Subtarget->hasNDD() ? X86::ADD8ri_ND : X86::ADD8ri))
852 .addDef(TmpReg, RegState::Dead)
853 .addReg(CondReg)
854 .addImm(Addend);
855 (void)AddI;
856 LLVM_DEBUG(dbgs() << " add cond: "; AddI->dump());
857 ++NumAddsInserted;
858 MI.findRegisterUseOperand(X86::EFLAGS, /*TRI=*/nullptr)->setIsKill(true);
859}
860
862#define FROM_TO(A, B) \
863 case X86::CMOV##A##_Fp32: \
864 case X86::CMOV##A##_Fp64: \
865 case X86::CMOV##A##_Fp80: \
866 return X86::COND_##B;
867
868 switch (Opc) {
869 default:
870 return X86::COND_INVALID;
871 FROM_TO(B, B)
872 FROM_TO(E, E)
873 FROM_TO(P, P)
874 FROM_TO(BE, BE)
875 FROM_TO(NB, AE)
876 FROM_TO(NE, NE)
877 FROM_TO(NP, NP)
878 FROM_TO(NBE, A)
879 }
880#undef FROM_TO
881}
882
883static unsigned getOpcodeWithCC(unsigned Opc, X86::CondCode CC) {
884 assert((CC == X86::COND_E || CC == X86::COND_NE) && "Unexpected CC");
885#define CASE(A) \
886 case X86::CMOVB_##A: \
887 case X86::CMOVE_##A: \
888 case X86::CMOVP_##A: \
889 case X86::CMOVBE_##A: \
890 case X86::CMOVNB_##A: \
891 case X86::CMOVNE_##A: \
892 case X86::CMOVNP_##A: \
893 case X86::CMOVNBE_##A: \
894 return (CC == X86::COND_E) ? X86::CMOVE_##A : X86::CMOVNE_##A;
895 switch (Opc) {
896 default:
897 llvm_unreachable("Unexpected opcode");
898 CASE(Fp32)
899 CASE(Fp64)
900 CASE(Fp80)
901 }
902#undef CASE
903}
904
905void X86FlagsCopyLoweringImpl::rewriteMI(MachineBasicBlock &MBB,
907 const DebugLoc &Loc, MachineInstr &MI,
908 CondRegArray &CondRegs) {
909 // First get the register containing this specific condition.
910 bool IsImplicitCC = false;
912 if (CC == X86::COND_INVALID) {
913 CC = getImplicitCondFromMI(MI.getOpcode());
914 IsImplicitCC = true;
915 }
916 assert(CC != X86::COND_INVALID && "Unknown EFLAG user!");
917 Register CondReg;
918 bool Inverted;
919 std::tie(CondReg, Inverted) =
920 getCondOrInverseInReg(MBB, Pos, Loc, CC, CondRegs);
921
922 // Insert a direct test of the saved register.
923 insertTest(*MI.getParent(), MI.getIterator(), MI.getDebugLoc(), CondReg);
924
925 // Rewrite the instruction to use the !ZF flag from the test, and then kill
926 // its use of the flags afterward.
927 X86::CondCode NewCC = Inverted ? X86::COND_E : X86::COND_NE;
928 if (IsImplicitCC)
929 MI.setDesc(TII->get(getOpcodeWithCC(MI.getOpcode(), NewCC)));
930 else
931 MI.getOperand(MI.getDesc().getNumOperands() - 1).setImm(NewCC);
932
933 MI.findRegisterUseOperand(X86::EFLAGS, /*TRI=*/nullptr)->setIsKill(true);
934 LLVM_DEBUG(dbgs() << " fixed instruction: "; MI.dump());
935}
936
937bool X86FlagsCopyLoweringLegacy::runOnMachineFunction(MachineFunction &MF) {
938 auto *MDTWrapper = getAnalysisIfAvailable<MachineDominatorTreeWrapperPass>();
939 MachineDominatorTree *MDT = MDTWrapper ? &MDTWrapper->getDomTree() : nullptr;
940 return X86FlagsCopyLoweringImpl(MDT).runOnMachineFunction(MF);
941}
942
943PreservedAnalyses
948 bool Changed = X86FlagsCopyLoweringImpl(MDT).runOnMachineFunction(MF);
951}
unsigned const MachineRegisterInfo * MRI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock & MBB
#define CASE(ATTRNAME, AANAME,...)
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
SmallPtrSet< const BasicBlock *, 8 > VisitedBlocks
This file builds on the ADT/GraphTraits.h file to build generic depth first graph iterator.
#define DEBUG_TYPE
static const HTTPClientCleanup Cleanup
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
#define I(x, y, z)
Definition MD5.cpp:57
This file declares the MachineConstantPool class which is an abstract constant pool to keep track of ...
Register Reg
Register const TargetRegisterInfo * TRI
Promote Memory to Register
Definition Mem2Reg.cpp:110
MachineInstr unsigned OpIdx
#define P(N)
if(PassOpts->AAPipeline)
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
Definition PassSupport.h:44
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
Definition PassSupport.h:39
This file builds on the ADT/GraphTraits.h file to build a generic graph post order iterator.
const SmallVectorImpl< MachineOperand > & Cond
static void splitBlock(MachineBasicBlock &MBB, MachineInstr &MI, MachineDominatorTree *MDT)
SI Lower i1 Copies
This file contains some templates that are useful if you are working with the STL at all.
This file defines the make_scope_exit function, which executes user-defined cleanup logic at scope ex...
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition Statistic.h:171
#define LLVM_DEBUG(...)
Definition Debug.h:114
#define FROM_TO(FROM, TO)
cl::opt< bool > X86EnableAPXForRelocation
static X86::CondCode getImplicitCondFromMI(unsigned Opc)
@ InevitableClobber
static unsigned getOpcodeWithCC(unsigned Opc, X86::CondCode CC)
static bool isArithmeticOp(unsigned Opc)
static EFLAGSClobber getClobberType(const MachineInstr &MI)
#define PASS_KEY
Value * RHS
Value * LHS
PassT::Result * getCachedResult(IRUnitT &IR) const
Get the cached result of an analysis pass for a given IR unit.
Represent the analysis usage information of a pass.
AnalysisUsage & addUsedIfAvailable()
Add the specified Pass class to the set of analyses used by this pass.
FunctionPass class - This class is used to implement most global optimizations.
Definition Pass.h:314
void normalizeSuccProbs()
Normalize probabilities of all successors so that the sum of them becomes one.
LLVM_ABI MachineBasicBlock * getFallThrough(bool JumpToFallThrough=true)
Return the fallthrough block if the block can implicitly transfer control to the block after it by fa...
LLVM_ABI instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
LLVM_ABI iterator getFirstTerminator()
Returns an iterator to the first terminator instruction of this basic block.
LLVM_ABI void dump() const
LLVM_ABI void copySuccessor(const MachineBasicBlock *Orig, succ_iterator I)
Copy a successor (and any probability info) from original block to this block's.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
iterator_range< succ_iterator > successors()
LLVM_ABI bool isSuccessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB is a successor of this block.
iterator_range< pred_iterator > predecessors()
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
MachineInstrBundleIterator< MachineInstr > iterator
LLVM_ABI bool isLiveIn(MCRegister Reg, LaneBitmask LaneMask=LaneBitmask::getAll()) const
Return true if the specified register is in the live in set.
Analysis pass which computes a MachineDominatorTree.
Analysis pass which computes a MachineDominatorTree.
DominatorTree Class - Concrete subclass of DominatorTreeBase that is used to compute a normal dominat...
bool dominates(const MachineInstr *A, const MachineInstr *B) const
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
BasicBlockListType::iterator iterator
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
const MachineBasicBlock * getParent() const
bool isBranch(QueryType Type=AnyInBundle) const
Returns true if this is a conditional, unconditional, or indirect branch.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
LLVM_ABI void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
LLVM_ABI void dump() const
const MachineOperand & getOperand(unsigned i) const
MachineOperand class - Representation of each machine instruction operand.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MachineBasicBlock * getMBB() const
void setMBB(MachineBasicBlock *MBB)
Register getReg() const
getReg - Returns the register number.
static MachineOperand CreateMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0)
bool isMBB() const
isMBB - Tests if this is a MO_MachineBasicBlock operand.
void dump() const
Definition Pass.cpp:146
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition Analysis.h:118
bool insert(const value_type &X)
Insert a new element into the SetVector.
Definition SetVector.h:151
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
void push_back(const T &Elt)
PreservedAnalyses run(MachineFunction &MF, MachineFunctionAnalysisManager &MFAM)
const X86InstrInfo * getInstrInfo() const override
const X86RegisterInfo * getRegisterInfo() const override
self_iterator getIterator()
Definition ilist_node.h:123
Changed
Pass manager infrastructure for declaring and invalidating analyses.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ Dead
Unused definition.
CondCode getCondFromBranch(const MachineInstr &MI)
CondCode getCondFromMI(const MachineInstr &MI)
Return the condition code of the instruction.
@ AddrNumOperands
Definition X86BaseInfo.h:36
CondCode GetOppositeBranchCondition(CondCode CC)
GetOppositeBranchCondition - Return the inverse of the specified cond, e.g.
CondCode getCondFromSETCC(const MachineInstr &MI)
unsigned getNFVariant(unsigned Opc)
This is an optimization pass for GlobalISel generic memory operations.
Definition Types.h:26
static bool isAddMemInstrWithRelocation(const MachineInstr &MI)
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
auto successors(const MachineBasicBlock *BB)
scope_exit(Callable) -> scope_exit< Callable >
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
AnalysisManager< MachineFunction > MachineFunctionAnalysisManager
LLVM_ABI PreservedAnalyses getMachineFunctionPassPreservedAnalyses()
Returns the minimum set of Analyses that all machine function passes must preserve.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1744
auto reverse(ContainerTy &&C)
Definition STLExtras.h:406
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1751
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:167
idf_iterator< T > idf_end(const T &G)
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
FunctionPass * createX86FlagsCopyLoweringLegacyPass()
idf_iterator< T > idf_begin(const T &G)
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1770