LLVM 19.0.0git
X86FlagsCopyLowering.cpp
Go to the documentation of this file.
1//====- X86FlagsCopyLowering.cpp - Lowers COPY nodes of EFLAGS ------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9///
10/// Lowers COPY nodes of EFLAGS by directly extracting and preserving individual
11/// flag bits.
12///
13/// We have to do this by carefully analyzing and rewriting the usage of the
14/// copied EFLAGS register because there is no general way to rematerialize the
15/// entire EFLAGS register safely and efficiently. Using `popf` both forces
16/// dynamic stack adjustment and can create correctness issues due to IF, TF,
17/// and other non-status flags being overwritten. Using sequences involving
18/// SAHF don't work on all x86 processors and are often quite slow compared to
19/// directly testing a single status preserved in its own GPR.
20///
21//===----------------------------------------------------------------------===//
22
23#include "X86.h"
24#include "X86InstrBuilder.h"
25#include "X86InstrInfo.h"
26#include "X86Subtarget.h"
28#include "llvm/ADT/STLExtras.h"
29#include "llvm/ADT/ScopeExit.h"
32#include "llvm/ADT/Statistic.h"
48#include "llvm/IR/DebugLoc.h"
49#include "llvm/MC/MCSchedule.h"
50#include "llvm/Pass.h"
52#include "llvm/Support/Debug.h"
54#include <algorithm>
55#include <cassert>
56#include <iterator>
57#include <utility>
58
59using namespace llvm;
60
61#define PASS_KEY "x86-flags-copy-lowering"
62#define DEBUG_TYPE PASS_KEY
63
64STATISTIC(NumCopiesEliminated, "Number of copies of EFLAGS eliminated");
65STATISTIC(NumSetCCsInserted, "Number of setCC instructions inserted");
66STATISTIC(NumTestsInserted, "Number of test instructions inserted");
67STATISTIC(NumAddsInserted, "Number of adds instructions inserted");
68
69namespace {
70
71// Convenient array type for storing registers associated with each condition.
72using CondRegArray = std::array<unsigned, X86::LAST_VALID_COND + 1>;
73
74class X86FlagsCopyLoweringPass : public MachineFunctionPass {
75public:
76 X86FlagsCopyLoweringPass() : MachineFunctionPass(ID) { }
77
78 StringRef getPassName() const override { return "X86 EFLAGS copy lowering"; }
79 bool runOnMachineFunction(MachineFunction &MF) override;
80 void getAnalysisUsage(AnalysisUsage &AU) const override;
81
82 /// Pass identification, replacement for typeid.
83 static char ID;
84
85private:
86 MachineRegisterInfo *MRI = nullptr;
87 const X86Subtarget *Subtarget = nullptr;
88 const X86InstrInfo *TII = nullptr;
89 const TargetRegisterInfo *TRI = nullptr;
90 const TargetRegisterClass *PromoteRC = nullptr;
91 MachineDominatorTree *MDT = nullptr;
92
93 CondRegArray collectCondsInRegs(MachineBasicBlock &MBB,
95
96 Register promoteCondToReg(MachineBasicBlock &MBB,
98 const DebugLoc &TestLoc, X86::CondCode Cond);
99 std::pair<unsigned, bool> getCondOrInverseInReg(
101 const DebugLoc &TestLoc, X86::CondCode Cond, CondRegArray &CondRegs);
103 const DebugLoc &Loc, unsigned Reg);
104
105 void rewriteArithmetic(MachineBasicBlock &TestMBB,
107 const DebugLoc &TestLoc, MachineInstr &MI,
108 MachineOperand &FlagUse, CondRegArray &CondRegs);
109 void rewriteCMov(MachineBasicBlock &TestMBB,
110 MachineBasicBlock::iterator TestPos, const DebugLoc &TestLoc,
111 MachineInstr &CMovI, MachineOperand &FlagUse,
112 CondRegArray &CondRegs);
113 void rewriteFCMov(MachineBasicBlock &TestMBB,
115 const DebugLoc &TestLoc, MachineInstr &CMovI,
116 MachineOperand &FlagUse, CondRegArray &CondRegs);
117 void rewriteCondJmp(MachineBasicBlock &TestMBB,
119 const DebugLoc &TestLoc, MachineInstr &JmpI,
120 CondRegArray &CondRegs);
121 void rewriteCopy(MachineInstr &MI, MachineOperand &FlagUse,
122 MachineInstr &CopyDefI);
123 void rewriteSetCC(MachineBasicBlock &TestMBB,
125 const DebugLoc &TestLoc, MachineInstr &SetCCI,
126 MachineOperand &FlagUse, CondRegArray &CondRegs);
127};
128
129} // end anonymous namespace
130
131INITIALIZE_PASS_BEGIN(X86FlagsCopyLoweringPass, DEBUG_TYPE,
132 "X86 EFLAGS copy lowering", false, false)
133INITIALIZE_PASS_END(X86FlagsCopyLoweringPass, DEBUG_TYPE,
134 "X86 EFLAGS copy lowering", false, false)
135
137 return new X86FlagsCopyLoweringPass();
138}
139
140char X86FlagsCopyLoweringPass::ID = 0;
141
142void X86FlagsCopyLoweringPass::getAnalysisUsage(AnalysisUsage &AU) const {
145}
146
147namespace {
148/// An enumeration of the arithmetic instruction mnemonics which have
149/// interesting flag semantics.
150///
151/// We can map instruction opcodes into these mnemonics to make it easy to
152/// dispatch with specific functionality.
153enum class FlagArithMnemonic {
154 ADC,
155 RCL,
156 RCR,
157 SBB,
158 SETB,
159};
160} // namespace
161
162static FlagArithMnemonic getMnemonicFromOpcode(unsigned Opcode) {
163 switch (Opcode) {
164 default:
165 report_fatal_error("No support for lowering a copy into EFLAGS when used "
166 "by this instruction!");
167
168#define CASE_ND(OP) \
169 case X86::OP: \
170 case X86::OP##_ND:
171
172#define LLVM_EXPAND_INSTR_SIZES(MNEMONIC, SUFFIX) \
173 CASE_ND(MNEMONIC##8##SUFFIX) \
174 CASE_ND(MNEMONIC##16##SUFFIX) \
175 CASE_ND(MNEMONIC##32##SUFFIX) \
176 CASE_ND(MNEMONIC##64##SUFFIX)
177
178#define LLVM_EXPAND_ADC_SBB_INSTR(MNEMONIC) \
179 LLVM_EXPAND_INSTR_SIZES(MNEMONIC, rr) \
180 LLVM_EXPAND_INSTR_SIZES(MNEMONIC, rm) \
181 LLVM_EXPAND_INSTR_SIZES(MNEMONIC, mr) \
182 CASE_ND(MNEMONIC##8ri) \
183 CASE_ND(MNEMONIC##16ri8) \
184 CASE_ND(MNEMONIC##32ri8) \
185 CASE_ND(MNEMONIC##64ri8) \
186 CASE_ND(MNEMONIC##16ri) \
187 CASE_ND(MNEMONIC##32ri) \
188 CASE_ND(MNEMONIC##64ri32) \
189 CASE_ND(MNEMONIC##8mi) \
190 CASE_ND(MNEMONIC##16mi8) \
191 CASE_ND(MNEMONIC##32mi8) \
192 CASE_ND(MNEMONIC##64mi8) \
193 CASE_ND(MNEMONIC##16mi) \
194 CASE_ND(MNEMONIC##32mi) \
195 CASE_ND(MNEMONIC##64mi32) \
196 case X86::MNEMONIC##8i8: \
197 case X86::MNEMONIC##16i16: \
198 case X86::MNEMONIC##32i32: \
199 case X86::MNEMONIC##64i32:
200
202 return FlagArithMnemonic::ADC;
203
205 return FlagArithMnemonic::SBB;
206
207#undef LLVM_EXPAND_ADC_SBB_INSTR
208
212 return FlagArithMnemonic::RCL;
213
217 return FlagArithMnemonic::RCR;
218
219#undef LLVM_EXPAND_INSTR_SIZES
220#undef CASE_ND
221
222 case X86::SETB_C32r:
223 case X86::SETB_C64r:
224 return FlagArithMnemonic::SETB;
225 }
226}
227
229 MachineInstr &SplitI,
230 const X86InstrInfo &TII) {
232
233 assert(SplitI.getParent() == &MBB &&
234 "Split instruction must be in the split block!");
235 assert(SplitI.isBranch() &&
236 "Only designed to split a tail of branch instructions!");
238 "Must split on an actual jCC instruction!");
239
240 // Dig out the previous instruction to the split point.
241 MachineInstr &PrevI = *std::prev(SplitI.getIterator());
242 assert(PrevI.isBranch() && "Must split after a branch!");
244 "Must split after an actual jCC instruction!");
245 assert(!std::prev(PrevI.getIterator())->isTerminator() &&
246 "Must only have this one terminator prior to the split!");
247
248 // Grab the one successor edge that will stay in `MBB`.
249 MachineBasicBlock &UnsplitSucc = *PrevI.getOperand(0).getMBB();
250
251 // Analyze the original block to see if we are actually splitting an edge
252 // into two edges. This can happen when we have multiple conditional jumps to
253 // the same successor.
254 bool IsEdgeSplit =
255 std::any_of(SplitI.getIterator(), MBB.instr_end(),
256 [&](MachineInstr &MI) {
257 assert(MI.isTerminator() &&
258 "Should only have spliced terminators!");
259 return llvm::any_of(
260 MI.operands(), [&](MachineOperand &MOp) {
261 return MOp.isMBB() && MOp.getMBB() == &UnsplitSucc;
262 });
263 }) ||
264 MBB.getFallThrough() == &UnsplitSucc;
265
266 MachineBasicBlock &NewMBB = *MF.CreateMachineBasicBlock();
267
268 // Insert the new block immediately after the current one. Any existing
269 // fallthrough will be sunk into this new block anyways.
270 MF.insert(std::next(MachineFunction::iterator(&MBB)), &NewMBB);
271
272 // Splice the tail of instructions into the new block.
273 NewMBB.splice(NewMBB.end(), &MBB, SplitI.getIterator(), MBB.end());
274
275 // Copy the necessary succesors (and their probability info) into the new
276 // block.
277 for (auto SI = MBB.succ_begin(), SE = MBB.succ_end(); SI != SE; ++SI)
278 if (IsEdgeSplit || *SI != &UnsplitSucc)
279 NewMBB.copySuccessor(&MBB, SI);
280 // Normalize the probabilities if we didn't end up splitting the edge.
281 if (!IsEdgeSplit)
282 NewMBB.normalizeSuccProbs();
283
284 // Now replace all of the moved successors in the original block with the new
285 // block. This will merge their probabilities.
286 for (MachineBasicBlock *Succ : NewMBB.successors())
287 if (Succ != &UnsplitSucc)
288 MBB.replaceSuccessor(Succ, &NewMBB);
289
290 // We should always end up replacing at least one successor.
291 assert(MBB.isSuccessor(&NewMBB) &&
292 "Failed to make the new block a successor!");
293
294 // Now update all the PHIs.
295 for (MachineBasicBlock *Succ : NewMBB.successors()) {
296 for (MachineInstr &MI : *Succ) {
297 if (!MI.isPHI())
298 break;
299
300 for (int OpIdx = 1, NumOps = MI.getNumOperands(); OpIdx < NumOps;
301 OpIdx += 2) {
302 MachineOperand &OpV = MI.getOperand(OpIdx);
303 MachineOperand &OpMBB = MI.getOperand(OpIdx + 1);
304 assert(OpMBB.isMBB() && "Block operand to a PHI is not a block!");
305 if (OpMBB.getMBB() != &MBB)
306 continue;
307
308 // Replace the operand for unsplit successors
309 if (!IsEdgeSplit || Succ != &UnsplitSucc) {
310 OpMBB.setMBB(&NewMBB);
311
312 // We have to continue scanning as there may be multiple entries in
313 // the PHI.
314 continue;
315 }
316
317 // When we have split the edge append a new successor.
318 MI.addOperand(MF, OpV);
319 MI.addOperand(MF, MachineOperand::CreateMBB(&NewMBB));
320 break;
321 }
322 }
323 }
324
325 return NewMBB;
326}
327
328static X86::CondCode getCondFromFCMOV(unsigned Opcode) {
329 switch (Opcode) {
330 default: return X86::COND_INVALID;
331 case X86::CMOVBE_Fp32: case X86::CMOVBE_Fp64: case X86::CMOVBE_Fp80:
332 return X86::COND_BE;
333 case X86::CMOVB_Fp32: case X86::CMOVB_Fp64: case X86::CMOVB_Fp80:
334 return X86::COND_B;
335 case X86::CMOVE_Fp32: case X86::CMOVE_Fp64: case X86::CMOVE_Fp80:
336 return X86::COND_E;
337 case X86::CMOVNBE_Fp32: case X86::CMOVNBE_Fp64: case X86::CMOVNBE_Fp80:
338 return X86::COND_A;
339 case X86::CMOVNB_Fp32: case X86::CMOVNB_Fp64: case X86::CMOVNB_Fp80:
340 return X86::COND_AE;
341 case X86::CMOVNE_Fp32: case X86::CMOVNE_Fp64: case X86::CMOVNE_Fp80:
342 return X86::COND_NE;
343 case X86::CMOVNP_Fp32: case X86::CMOVNP_Fp64: case X86::CMOVNP_Fp80:
344 return X86::COND_NP;
345 case X86::CMOVP_Fp32: case X86::CMOVP_Fp64: case X86::CMOVP_Fp80:
346 return X86::COND_P;
347 }
348}
349
350bool X86FlagsCopyLoweringPass::runOnMachineFunction(MachineFunction &MF) {
351 LLVM_DEBUG(dbgs() << "********** " << getPassName() << " : " << MF.getName()
352 << " **********\n");
353
354 Subtarget = &MF.getSubtarget<X86Subtarget>();
355 MRI = &MF.getRegInfo();
356 TII = Subtarget->getInstrInfo();
357 TRI = Subtarget->getRegisterInfo();
358 MDT = &getAnalysis<MachineDominatorTree>();
359 PromoteRC = &X86::GR8RegClass;
360
361 if (MF.begin() == MF.end())
362 // Nothing to do for a degenerate empty function...
363 return false;
364
365 // Collect the copies in RPO so that when there are chains where a copy is in
366 // turn copied again we visit the first one first. This ensures we can find
367 // viable locations for testing the original EFLAGS that dominate all the
368 // uses across complex CFGs.
371 for (MachineBasicBlock *MBB : RPOT)
372 for (MachineInstr &MI : *MBB)
373 if (MI.getOpcode() == TargetOpcode::COPY &&
374 MI.getOperand(0).getReg() == X86::EFLAGS)
375 Copies.push_back(&MI);
376
377 for (MachineInstr *CopyI : Copies) {
378 MachineBasicBlock &MBB = *CopyI->getParent();
379
380 MachineOperand &VOp = CopyI->getOperand(1);
381 assert(VOp.isReg() &&
382 "The input to the copy for EFLAGS should always be a register!");
383 MachineInstr &CopyDefI = *MRI->getVRegDef(VOp.getReg());
384 if (CopyDefI.getOpcode() != TargetOpcode::COPY) {
385 // FIXME: The big likely candidate here are PHI nodes. We could in theory
386 // handle PHI nodes, but it gets really, really hard. Insanely hard. Hard
387 // enough that it is probably better to change every other part of LLVM
388 // to avoid creating them. The issue is that once we have PHIs we won't
389 // know which original EFLAGS value we need to capture with our setCCs
390 // below. The end result will be computing a complete set of setCCs that
391 // we *might* want, computing them in every place where we copy *out* of
392 // EFLAGS and then doing SSA formation on all of them to insert necessary
393 // PHI nodes and consume those here. Then hoping that somehow we DCE the
394 // unnecessary ones. This DCE seems very unlikely to be successful and so
395 // we will almost certainly end up with a glut of dead setCC
396 // instructions. Until we have a motivating test case and fail to avoid
397 // it by changing other parts of LLVM's lowering, we refuse to handle
398 // this complex case here.
400 dbgs() << "ERROR: Encountered unexpected def of an eflags copy: ";
401 CopyDefI.dump());
403 "Cannot lower EFLAGS copy unless it is defined in turn by a copy!");
404 }
405
406 auto Cleanup = make_scope_exit([&] {
407 // All uses of the EFLAGS copy are now rewritten, kill the copy into
408 // eflags and if dead the copy from.
409 CopyI->eraseFromParent();
410 if (MRI->use_empty(CopyDefI.getOperand(0).getReg()))
411 CopyDefI.eraseFromParent();
412 ++NumCopiesEliminated;
413 });
414
415 MachineOperand &DOp = CopyI->getOperand(0);
416 assert(DOp.isDef() && "Expected register def!");
417 assert(DOp.getReg() == X86::EFLAGS && "Unexpected copy def register!");
418 if (DOp.isDead())
419 continue;
420
421 MachineBasicBlock *TestMBB = CopyDefI.getParent();
422 auto TestPos = CopyDefI.getIterator();
423 DebugLoc TestLoc = CopyDefI.getDebugLoc();
424
425 LLVM_DEBUG(dbgs() << "Rewriting copy: "; CopyI->dump());
426
427 // Walk up across live-in EFLAGS to find where they were actually def'ed.
428 //
429 // This copy's def may just be part of a region of blocks covered by
430 // a single def of EFLAGS and we want to find the top of that region where
431 // possible.
432 //
433 // This is essentially a search for a *candidate* reaching definition
434 // location. We don't need to ever find the actual reaching definition here,
435 // but we want to walk up the dominator tree to find the highest point which
436 // would be viable for such a definition.
437 auto HasEFLAGSClobber = [&](MachineBasicBlock::iterator Begin,
439 // Scan backwards as we expect these to be relatively short and often find
440 // a clobber near the end.
441 return llvm::any_of(
443 // Flag any instruction (other than the copy we are
444 // currently rewriting) that defs EFLAGS.
445 return &MI != CopyI &&
446 MI.findRegisterDefOperand(X86::EFLAGS, /*TRI=*/nullptr);
447 });
448 };
449 auto HasEFLAGSClobberPath = [&](MachineBasicBlock *BeginMBB,
450 MachineBasicBlock *EndMBB) {
451 assert(MDT->dominates(BeginMBB, EndMBB) &&
452 "Only support paths down the dominator tree!");
455 // We terminate at the beginning. No need to scan it.
456 Visited.insert(BeginMBB);
457 Worklist.push_back(EndMBB);
458 do {
459 auto *MBB = Worklist.pop_back_val();
460 for (auto *PredMBB : MBB->predecessors()) {
461 if (!Visited.insert(PredMBB).second)
462 continue;
463 if (HasEFLAGSClobber(PredMBB->begin(), PredMBB->end()))
464 return true;
465 // Enqueue this block to walk its predecessors.
466 Worklist.push_back(PredMBB);
467 }
468 } while (!Worklist.empty());
469 // No clobber found along a path from the begin to end.
470 return false;
471 };
472 while (TestMBB->isLiveIn(X86::EFLAGS) && !TestMBB->pred_empty() &&
473 !HasEFLAGSClobber(TestMBB->begin(), TestPos)) {
474 // Find the nearest common dominator of the predecessors, as
475 // that will be the best candidate to hoist into.
476 MachineBasicBlock *HoistMBB =
477 std::accumulate(std::next(TestMBB->pred_begin()), TestMBB->pred_end(),
478 *TestMBB->pred_begin(),
479 [&](MachineBasicBlock *LHS, MachineBasicBlock *RHS) {
480 return MDT->findNearestCommonDominator(LHS, RHS);
481 });
482
483 // Now we need to scan all predecessors that may be reached along paths to
484 // the hoist block. A clobber anywhere in any of these blocks the hoist.
485 // Note that this even handles loops because we require *no* clobbers.
486 if (HasEFLAGSClobberPath(HoistMBB, TestMBB))
487 break;
488
489 // We also need the terminators to not sneakily clobber flags.
490 if (HasEFLAGSClobber(HoistMBB->getFirstTerminator()->getIterator(),
491 HoistMBB->instr_end()))
492 break;
493
494 // We found a viable location, hoist our test position to it.
495 TestMBB = HoistMBB;
496 TestPos = TestMBB->getFirstTerminator()->getIterator();
497 // Clear the debug location as it would just be confusing after hoisting.
498 TestLoc = DebugLoc();
499 }
500 LLVM_DEBUG({
501 auto DefIt = llvm::find_if(
502 llvm::reverse(llvm::make_range(TestMBB->instr_begin(), TestPos)),
503 [&](MachineInstr &MI) {
504 return MI.findRegisterDefOperand(X86::EFLAGS, /*TRI=*/nullptr);
505 });
506 if (DefIt.base() != TestMBB->instr_begin()) {
507 dbgs() << " Using EFLAGS defined by: ";
508 DefIt->dump();
509 } else {
510 dbgs() << " Using live-in flags for BB:\n";
511 TestMBB->dump();
512 }
513 });
514
515 // While rewriting uses, we buffer jumps and rewrite them in a second pass
516 // because doing so will perturb the CFG that we are walking to find the
517 // uses in the first place.
519
520 // Gather the condition flags that have already been preserved in
521 // registers. We do this from scratch each time as we expect there to be
522 // very few of them and we expect to not revisit the same copy definition
523 // many times. If either of those change sufficiently we could build a map
524 // of these up front instead.
525 CondRegArray CondRegs = collectCondsInRegs(*TestMBB, TestPos);
526
527 // Collect the basic blocks we need to scan. Typically this will just be
528 // a single basic block but we may have to scan multiple blocks if the
529 // EFLAGS copy lives into successors.
532 Blocks.push_back(&MBB);
533
534 do {
535 MachineBasicBlock &UseMBB = *Blocks.pop_back_val();
536
537 // Track when if/when we find a kill of the flags in this block.
538 bool FlagsKilled = false;
539
540 // In most cases, we walk from the beginning to the end of the block. But
541 // when the block is the same block as the copy is from, we will visit it
542 // twice. The first time we start from the copy and go to the end. The
543 // second time we start from the beginning and go to the copy. This lets
544 // us handle copies inside of cycles.
545 // FIXME: This loop is *super* confusing. This is at least in part
546 // a symptom of all of this routine needing to be refactored into
547 // documentable components. Once done, there may be a better way to write
548 // this loop.
549 for (auto MII = (&UseMBB == &MBB && !VisitedBlocks.count(&UseMBB))
550 ? std::next(CopyI->getIterator())
551 : UseMBB.instr_begin(),
552 MIE = UseMBB.instr_end();
553 MII != MIE;) {
554 MachineInstr &MI = *MII++;
555 // If we are in the original copy block and encounter either the copy
556 // def or the copy itself, break so that we don't re-process any part of
557 // the block or process the instructions in the range that was copied
558 // over.
559 if (&MI == CopyI || &MI == &CopyDefI) {
560 assert(&UseMBB == &MBB && VisitedBlocks.count(&MBB) &&
561 "Should only encounter these on the second pass over the "
562 "original block.");
563 break;
564 }
565
566 MachineOperand *FlagUse =
567 MI.findRegisterUseOperand(X86::EFLAGS, /*TRI=*/nullptr);
568 if (!FlagUse) {
569 if (MI.findRegisterDefOperand(X86::EFLAGS, /*TRI=*/nullptr)) {
570 // If EFLAGS are defined, it's as-if they were killed. We can stop
571 // scanning here.
572 //
573 // NB!!! Many instructions only modify some flags. LLVM currently
574 // models this as clobbering all flags, but if that ever changes
575 // this will need to be carefully updated to handle that more
576 // complex logic.
577 FlagsKilled = true;
578 break;
579 }
580 continue;
581 }
582
583 LLVM_DEBUG(dbgs() << " Rewriting use: "; MI.dump());
584
585 // Check the kill flag before we rewrite as that may change it.
586 if (FlagUse->isKill())
587 FlagsKilled = true;
588
589 // Once we encounter a branch, the rest of the instructions must also be
590 // branches. We can't rewrite in place here, so we handle them below.
591 //
592 // Note that we don't have to handle tail calls here, even conditional
593 // tail calls, as those are not introduced into the X86 MI until post-RA
594 // branch folding or black placement. As a consequence, we get to deal
595 // with the simpler formulation of conditional branches followed by tail
596 // calls.
598 auto JmpIt = MI.getIterator();
599 do {
600 JmpIs.push_back(&*JmpIt);
601 ++JmpIt;
602 } while (JmpIt != UseMBB.instr_end() &&
603 X86::getCondFromBranch(*JmpIt) !=
605 break;
606 }
607
608 // Otherwise we can just rewrite in-place.
611 rewriteCMov(*TestMBB, TestPos, TestLoc, MI, *FlagUse, CondRegs);
612 } else if (getCondFromFCMOV(MI.getOpcode()) != X86::COND_INVALID) {
613 rewriteFCMov(*TestMBB, TestPos, TestLoc, MI, *FlagUse, CondRegs);
615 rewriteSetCC(*TestMBB, TestPos, TestLoc, MI, *FlagUse, CondRegs);
616 } else if (MI.getOpcode() == TargetOpcode::COPY) {
617 rewriteCopy(MI, *FlagUse, CopyDefI);
618 } else {
619 // We assume all other instructions that use flags also def them.
620 assert(MI.findRegisterDefOperand(X86::EFLAGS, /*TRI=*/nullptr) &&
621 "Expected a def of EFLAGS for this instruction!");
622
623 // NB!!! Several arithmetic instructions only *partially* update
624 // flags. Theoretically, we could generate MI code sequences that
625 // would rely on this fact and observe different flags independently.
626 // But currently LLVM models all of these instructions as clobbering
627 // all the flags in an undef way. We rely on that to simplify the
628 // logic.
629 FlagsKilled = true;
630
631 // Generically handle remaining uses as arithmetic instructions.
632 rewriteArithmetic(*TestMBB, TestPos, TestLoc, MI, *FlagUse,
633 CondRegs);
634 }
635
636 // If this was the last use of the flags, we're done.
637 if (FlagsKilled)
638 break;
639 }
640
641 // If the flags were killed, we're done with this block.
642 if (FlagsKilled)
643 continue;
644
645 // Otherwise we need to scan successors for ones where the flags live-in
646 // and queue those up for processing.
647 for (MachineBasicBlock *SuccMBB : UseMBB.successors())
648 if (SuccMBB->isLiveIn(X86::EFLAGS) &&
649 VisitedBlocks.insert(SuccMBB).second) {
650 // We currently don't do any PHI insertion and so we require that the
651 // test basic block dominates all of the use basic blocks. Further, we
652 // can't have a cycle from the test block back to itself as that would
653 // create a cycle requiring a PHI to break it.
654 //
655 // We could in theory do PHI insertion here if it becomes useful by
656 // just taking undef values in along every edge that we don't trace
657 // this EFLAGS copy along. This isn't as bad as fully general PHI
658 // insertion, but still seems like a great deal of complexity.
659 //
660 // Because it is theoretically possible that some earlier MI pass or
661 // other lowering transformation could induce this to happen, we do
662 // a hard check even in non-debug builds here.
663 if (SuccMBB == TestMBB || !MDT->dominates(TestMBB, SuccMBB)) {
664 LLVM_DEBUG({
665 dbgs()
666 << "ERROR: Encountered use that is not dominated by our test "
667 "basic block! Rewriting this would require inserting PHI "
668 "nodes to track the flag state across the CFG.\n\nTest "
669 "block:\n";
670 TestMBB->dump();
671 dbgs() << "Use block:\n";
672 SuccMBB->dump();
673 });
675 "Cannot lower EFLAGS copy when original copy def "
676 "does not dominate all uses.");
677 }
678
679 Blocks.push_back(SuccMBB);
680
681 // After this, EFLAGS will be recreated before each use.
682 SuccMBB->removeLiveIn(X86::EFLAGS);
683 }
684 } while (!Blocks.empty());
685
686 // Now rewrite the jumps that use the flags. These we handle specially
687 // because if there are multiple jumps in a single basic block we'll have
688 // to do surgery on the CFG.
689 MachineBasicBlock *LastJmpMBB = nullptr;
690 for (MachineInstr *JmpI : JmpIs) {
691 // Past the first jump within a basic block we need to split the blocks
692 // apart.
693 if (JmpI->getParent() == LastJmpMBB)
694 splitBlock(*JmpI->getParent(), *JmpI, *TII);
695 else
696 LastJmpMBB = JmpI->getParent();
697
698 rewriteCondJmp(*TestMBB, TestPos, TestLoc, *JmpI, CondRegs);
699 }
700
701 // FIXME: Mark the last use of EFLAGS before the copy's def as a kill if
702 // the copy's def operand is itself a kill.
703 }
704
705#ifndef NDEBUG
706 for (MachineBasicBlock &MBB : MF)
707 for (MachineInstr &MI : MBB)
708 if (MI.getOpcode() == TargetOpcode::COPY &&
709 (MI.getOperand(0).getReg() == X86::EFLAGS ||
710 MI.getOperand(1).getReg() == X86::EFLAGS)) {
711 LLVM_DEBUG(dbgs() << "ERROR: Found a COPY involving EFLAGS: ";
712 MI.dump());
713 llvm_unreachable("Unlowered EFLAGS copy!");
714 }
715#endif
716
717 return true;
718}
719
720/// Collect any conditions that have already been set in registers so that we
721/// can re-use them rather than adding duplicates.
722CondRegArray X86FlagsCopyLoweringPass::collectCondsInRegs(
724 CondRegArray CondRegs = {};
725
726 // Scan backwards across the range of instructions with live EFLAGS.
727 for (MachineInstr &MI :
730 if (Cond != X86::COND_INVALID && !MI.mayStore() &&
731 MI.getOperand(0).isReg() && MI.getOperand(0).getReg().isVirtual()) {
732 assert(MI.getOperand(0).isDef() &&
733 "A non-storing SETcc should always define a register!");
734 CondRegs[Cond] = MI.getOperand(0).getReg();
735 }
736
737 // Stop scanning when we see the first definition of the EFLAGS as prior to
738 // this we would potentially capture the wrong flag state.
739 if (MI.findRegisterDefOperand(X86::EFLAGS, /*TRI=*/nullptr))
740 break;
741 }
742 return CondRegs;
743}
744
745Register X86FlagsCopyLoweringPass::promoteCondToReg(
747 const DebugLoc &TestLoc, X86::CondCode Cond) {
748 Register Reg = MRI->createVirtualRegister(PromoteRC);
749 auto SetI = BuildMI(TestMBB, TestPos, TestLoc,
750 TII->get(X86::SETCCr), Reg).addImm(Cond);
751 (void)SetI;
752 LLVM_DEBUG(dbgs() << " save cond: "; SetI->dump());
753 ++NumSetCCsInserted;
754 return Reg;
755}
756
757std::pair<unsigned, bool> X86FlagsCopyLoweringPass::getCondOrInverseInReg(
759 const DebugLoc &TestLoc, X86::CondCode Cond, CondRegArray &CondRegs) {
760 unsigned &CondReg = CondRegs[Cond];
761 unsigned &InvCondReg = CondRegs[X86::GetOppositeBranchCondition(Cond)];
762 if (!CondReg && !InvCondReg)
763 CondReg = promoteCondToReg(TestMBB, TestPos, TestLoc, Cond);
764
765 if (CondReg)
766 return {CondReg, false};
767 else
768 return {InvCondReg, true};
769}
770
771void X86FlagsCopyLoweringPass::insertTest(MachineBasicBlock &MBB,
773 const DebugLoc &Loc, unsigned Reg) {
774 auto TestI =
775 BuildMI(MBB, Pos, Loc, TII->get(X86::TEST8rr)).addReg(Reg).addReg(Reg);
776 (void)TestI;
777 LLVM_DEBUG(dbgs() << " test cond: "; TestI->dump());
778 ++NumTestsInserted;
779}
780
781void X86FlagsCopyLoweringPass::rewriteArithmetic(
783 const DebugLoc &TestLoc, MachineInstr &MI, MachineOperand &FlagUse,
784 CondRegArray &CondRegs) {
785 // Arithmetic is either reading CF or OF. Figure out which condition we need
786 // to preserve in a register.
788
789 // The addend to use to reset CF or OF when added to the flag value.
790 int Addend = 0;
791
792 switch (getMnemonicFromOpcode(MI.getOpcode())) {
793 case FlagArithMnemonic::ADC:
794 case FlagArithMnemonic::RCL:
795 case FlagArithMnemonic::RCR:
796 case FlagArithMnemonic::SBB:
797 case FlagArithMnemonic::SETB:
798 Cond = X86::COND_B; // CF == 1
799 // Set up an addend that when one is added will need a carry due to not
800 // having a higher bit available.
801 Addend = 255;
802 break;
803 }
804
805 // Now get a register that contains the value of the flag input to the
806 // arithmetic. We require exactly this flag to simplify the arithmetic
807 // required to materialize it back into the flag.
808 unsigned &CondReg = CondRegs[Cond];
809 if (!CondReg)
810 CondReg = promoteCondToReg(TestMBB, TestPos, TestLoc, Cond);
811
812 MachineBasicBlock &MBB = *MI.getParent();
813
814 // Insert an instruction that will set the flag back to the desired value.
815 Register TmpReg = MRI->createVirtualRegister(PromoteRC);
816 auto AddI =
817 BuildMI(MBB, MI.getIterator(), MI.getDebugLoc(),
818 TII->get(Subtarget->hasNDD() ? X86::ADD8ri_ND : X86::ADD8ri))
819 .addDef(TmpReg, RegState::Dead)
820 .addReg(CondReg)
821 .addImm(Addend);
822 (void)AddI;
823 LLVM_DEBUG(dbgs() << " add cond: "; AddI->dump());
824 ++NumAddsInserted;
825 FlagUse.setIsKill(true);
826}
827
828void X86FlagsCopyLoweringPass::rewriteCMov(MachineBasicBlock &TestMBB,
830 const DebugLoc &TestLoc,
831 MachineInstr &CMovI,
832 MachineOperand &FlagUse,
833 CondRegArray &CondRegs) {
834 // First get the register containing this specific condition.
837 : X86::getCondFromCMov(CMovI);
838 unsigned CondReg;
839 bool Inverted;
840 std::tie(CondReg, Inverted) =
841 getCondOrInverseInReg(TestMBB, TestPos, TestLoc, Cond, CondRegs);
842
843 MachineBasicBlock &MBB = *CMovI.getParent();
844
845 // Insert a direct test of the saved register.
846 insertTest(MBB, CMovI.getIterator(), CMovI.getDebugLoc(), CondReg);
847
848 // Rewrite the CMov to use the !ZF flag from the test, and then kill its use
849 // of the flags afterward.
850 CMovI.getOperand(CMovI.getDesc().getNumOperands() - 1)
851 .setImm(Inverted ? X86::COND_E : X86::COND_NE);
852 FlagUse.setIsKill(true);
853 LLVM_DEBUG(dbgs() << " fixed cmov: "; CMovI.dump());
854}
855
856void X86FlagsCopyLoweringPass::rewriteFCMov(MachineBasicBlock &TestMBB,
858 const DebugLoc &TestLoc,
859 MachineInstr &CMovI,
860 MachineOperand &FlagUse,
861 CondRegArray &CondRegs) {
862 // First get the register containing this specific condition.
864 unsigned CondReg;
865 bool Inverted;
866 std::tie(CondReg, Inverted) =
867 getCondOrInverseInReg(TestMBB, TestPos, TestLoc, Cond, CondRegs);
868
869 MachineBasicBlock &MBB = *CMovI.getParent();
870
871 // Insert a direct test of the saved register.
872 insertTest(MBB, CMovI.getIterator(), CMovI.getDebugLoc(), CondReg);
873
874 auto getFCMOVOpcode = [](unsigned Opcode, bool Inverted) {
875 switch (Opcode) {
876 default: llvm_unreachable("Unexpected opcode!");
877 case X86::CMOVBE_Fp32: case X86::CMOVNBE_Fp32:
878 case X86::CMOVB_Fp32: case X86::CMOVNB_Fp32:
879 case X86::CMOVE_Fp32: case X86::CMOVNE_Fp32:
880 case X86::CMOVP_Fp32: case X86::CMOVNP_Fp32:
881 return Inverted ? X86::CMOVE_Fp32 : X86::CMOVNE_Fp32;
882 case X86::CMOVBE_Fp64: case X86::CMOVNBE_Fp64:
883 case X86::CMOVB_Fp64: case X86::CMOVNB_Fp64:
884 case X86::CMOVE_Fp64: case X86::CMOVNE_Fp64:
885 case X86::CMOVP_Fp64: case X86::CMOVNP_Fp64:
886 return Inverted ? X86::CMOVE_Fp64 : X86::CMOVNE_Fp64;
887 case X86::CMOVBE_Fp80: case X86::CMOVNBE_Fp80:
888 case X86::CMOVB_Fp80: case X86::CMOVNB_Fp80:
889 case X86::CMOVE_Fp80: case X86::CMOVNE_Fp80:
890 case X86::CMOVP_Fp80: case X86::CMOVNP_Fp80:
891 return Inverted ? X86::CMOVE_Fp80 : X86::CMOVNE_Fp80;
892 }
893 };
894
895 // Rewrite the CMov to use the !ZF flag from the test.
896 CMovI.setDesc(TII->get(getFCMOVOpcode(CMovI.getOpcode(), Inverted)));
897 FlagUse.setIsKill(true);
898 LLVM_DEBUG(dbgs() << " fixed fcmov: "; CMovI.dump());
899}
900
901void X86FlagsCopyLoweringPass::rewriteCondJmp(
903 const DebugLoc &TestLoc, MachineInstr &JmpI, CondRegArray &CondRegs) {
904 // First get the register containing this specific condition.
906 unsigned CondReg;
907 bool Inverted;
908 std::tie(CondReg, Inverted) =
909 getCondOrInverseInReg(TestMBB, TestPos, TestLoc, Cond, CondRegs);
910
911 MachineBasicBlock &JmpMBB = *JmpI.getParent();
912
913 // Insert a direct test of the saved register.
914 insertTest(JmpMBB, JmpI.getIterator(), JmpI.getDebugLoc(), CondReg);
915
916 // Rewrite the jump to use the !ZF flag from the test, and kill its use of
917 // flags afterward.
918 JmpI.getOperand(1).setImm(Inverted ? X86::COND_E : X86::COND_NE);
919 JmpI.findRegisterUseOperand(X86::EFLAGS, /*TRI=*/nullptr)->setIsKill(true);
920 LLVM_DEBUG(dbgs() << " fixed jCC: "; JmpI.dump());
921}
922
923void X86FlagsCopyLoweringPass::rewriteCopy(MachineInstr &MI,
924 MachineOperand &FlagUse,
925 MachineInstr &CopyDefI) {
926 // Just replace this copy with the original copy def.
927 MRI->replaceRegWith(MI.getOperand(0).getReg(),
928 CopyDefI.getOperand(0).getReg());
929 MI.eraseFromParent();
930}
931
932void X86FlagsCopyLoweringPass::rewriteSetCC(MachineBasicBlock &TestMBB,
934 const DebugLoc &TestLoc,
935 MachineInstr &SetCCI,
936 MachineOperand &FlagUse,
937 CondRegArray &CondRegs) {
939 // Note that we can't usefully rewrite this to the inverse without complex
940 // analysis of the users of the setCC. Largely we rely on duplicates which
941 // could have been avoided already being avoided here.
942 unsigned &CondReg = CondRegs[Cond];
943 if (!CondReg)
944 CondReg = promoteCondToReg(TestMBB, TestPos, TestLoc, Cond);
945
946 // Rewriting a register def is trivial: we just replace the register and
947 // remove the setcc.
948 if (!SetCCI.mayStore()) {
949 assert(SetCCI.getOperand(0).isReg() &&
950 "Cannot have a non-register defined operand to SETcc!");
951 Register OldReg = SetCCI.getOperand(0).getReg();
952 // Drop Kill flags on the old register before replacing. CondReg may have
953 // a longer live range.
954 MRI->clearKillFlags(OldReg);
955 MRI->replaceRegWith(OldReg, CondReg);
956 SetCCI.eraseFromParent();
957 return;
958 }
959
960 // Otherwise, we need to emit a store.
961 auto MIB = BuildMI(*SetCCI.getParent(), SetCCI.getIterator(),
962 SetCCI.getDebugLoc(), TII->get(X86::MOV8mr));
963 // Copy the address operands.
964 for (int i = 0; i < X86::AddrNumOperands; ++i)
965 MIB.add(SetCCI.getOperand(i));
966
967 MIB.addReg(CondReg);
968
969 MIB.setMemRefs(SetCCI.memoperands());
970
971 SetCCI.eraseFromParent();
972}
unsigned const MachineRegisterInfo * MRI
MachineBasicBlock & MBB
#define LLVM_DEBUG(X)
Definition: Debug.h:101
bool End
Definition: ELF_riscv.cpp:480
DenseMap< Block *, BlockRelaxAux > Blocks
Definition: ELF_riscv.cpp:507
static const HTTPClientCleanup Cleanup
Definition: HTTPClient.cpp:42
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
This file declares the MachineConstantPool class which is an abstract constant pool to keep track of ...
unsigned const TargetRegisterInfo * TRI
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:59
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:52
This file builds on the ADT/GraphTraits.h file to build a generic graph post order iterator.
const SmallVectorImpl< MachineOperand > & Cond
static void r1(uint32_t &A, uint32_t &B, uint32_t &C, uint32_t &D, uint32_t &E, int I, uint32_t *Buf)
Definition: SHA1.cpp:45
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
SI Lower i1 Copies
This file contains some templates that are useful if you are working with the STL at all.
This file defines the make_scope_exit function, which executes user-defined cleanup logic at scope ex...
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition: Statistic.h:167
static X86::CondCode getCondFromFCMOV(unsigned Opcode)
#define LLVM_EXPAND_INSTR_SIZES(MNEMONIC, SUFFIX)
static MachineBasicBlock & splitBlock(MachineBasicBlock &MBB, MachineInstr &SplitI, const X86InstrInfo &TII)
static FlagArithMnemonic getMnemonicFromOpcode(unsigned Opcode)
#define DEBUG_TYPE
#define LLVM_EXPAND_ADC_SBB_INSTR(MNEMONIC)
X86 EFLAGS copy lowering
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
A debug info location.
Definition: DebugLoc.h:33
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:311
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
Definition: MCInstrDesc.h:237
void normalizeSuccProbs()
Normalize probabilities of all successors so that the sum of them becomes one.
instr_iterator instr_begin()
void replaceSuccessor(MachineBasicBlock *Old, MachineBasicBlock *New)
Replace successor OLD with NEW and update probability info.
MachineBasicBlock * getFallThrough(bool JumpToFallThrough=true)
Return the fallthrough block if the block can implicitly transfer control to the block after it by fa...
instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
bool isLiveIn(MCPhysReg Reg, LaneBitmask LaneMask=LaneBitmask::getAll()) const
Return true if the specified register is in the live in set.
iterator getFirstTerminator()
Returns an iterator to the first terminator instruction of this basic block.
void copySuccessor(const MachineBasicBlock *Orig, succ_iterator I)
Copy a successor (and any probability info) from original block to this block's.
instr_iterator instr_end()
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
iterator_range< succ_iterator > successors()
bool isSuccessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB is a successor of this block.
iterator_range< pred_iterator > predecessors()
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
DominatorTree Class - Concrete subclass of DominatorTreeBase that is used to compute a normal dominat...
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
virtual bool runOnMachineFunction(MachineFunction &MF)=0
runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
Definition: MachineInstr.h:69
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:558
const MachineBasicBlock * getParent() const
Definition: MachineInstr.h:341
bool isBranch(QueryType Type=AnyInBundle) const
Returns true if this is a conditional, unconditional, or indirect branch.
Definition: MachineInstr.h:971
const MCInstrDesc & getDesc() const
Returns the target instruction descriptor of this MachineInstr.
Definition: MachineInstr.h:555
void setDesc(const MCInstrDesc &TID)
Replace the instruction descriptor (thus opcode) of the current instruction with a new one.
ArrayRef< MachineMemOperand * > memoperands() const
Access to memory operands of the instruction.
Definition: MachineInstr.h:771
MachineOperand * findRegisterUseOperand(Register Reg, const TargetRegisterInfo *TRI, bool isKill=false)
Wrapper for findRegisterUseOperandIdx, it returns a pointer to the MachineOperand rather than an inde...
bool mayStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly modify memory.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
Definition: MachineInstr.h:487
void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:568
MachineOperand class - Representation of each machine instruction operand.
void setImm(int64_t immVal)
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MachineBasicBlock * getMBB() const
void setIsKill(bool Val=true)
void setMBB(MachineBasicBlock *MBB)
Register getReg() const
getReg - Returns the register number.
static MachineOperand CreateMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0)
bool isMBB() const
isMBB - Tests if this is a MO_MachineBasicBlock operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
void dump() const
Definition: Pass.cpp:136
virtual StringRef getPassName() const
getPassName - Return a nice clean name for a pass.
Definition: Pass.cpp:81
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:360
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:342
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:427
bool empty() const
Definition: SmallVector.h:94
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
self_iterator getIterator()
Definition: ilist_node.h:109
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition: CallingConv.h:24
@ Dead
Unused definition.
@ X86
Windows x64, Windows Itanium (IA-64)
Reg
All possible values of the reg field in the ModR/M byte.
CondCode getCondFromBranch(const MachineInstr &MI)
CondCode getCondFromCFCMov(const MachineInstr &MI)
CondCode GetOppositeBranchCondition(CondCode CC)
GetOppositeBranchCondition - Return the inverse of the specified cond, e.g.
@ AddrNumOperands
Definition: X86BaseInfo.h:36
CondCode getCondFromSETCC(const MachineInstr &MI)
CondCode getCondFromCMov(const MachineInstr &MI)
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
detail::scope_exit< std::decay_t< Callable > > make_scope_exit(Callable &&F)
Definition: ScopeExit.h:59
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
FunctionPass * createX86FlagsCopyLoweringPass()
Return a pass that lowers EFLAGS copy pseudo instructions.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1729
auto reverse(ContainerTy &&C)
Definition: STLExtras.h:419
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:156
OutputIt copy(R &&Range, OutputIt Out)
Definition: STLExtras.h:1824
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1749