LLVM 17.0.0git
IRTranslator.h
Go to the documentation of this file.
1//===- llvm/CodeGen/GlobalISel/IRTranslator.h - IRTranslator ----*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file declares the IRTranslator pass.
10/// This pass is responsible for translating LLVM IR into MachineInstr.
11/// It uses target hooks to lower the ABI but aside from that, the pass
12/// generated code is generic. This is the default translator used for
13/// GlobalISel.
14///
15/// \todo Replace the comments with actual doxygen comments.
16//===----------------------------------------------------------------------===//
17
18#ifndef LLVM_CODEGEN_GLOBALISEL_IRTRANSLATOR_H
19#define LLVM_CODEGEN_GLOBALISEL_IRTRANSLATOR_H
20
21#include "llvm/ADT/DenseMap.h"
31#include <memory>
32#include <utility>
33
34namespace llvm {
35
36class AllocaInst;
37class AssumptionCache;
38class BasicBlock;
39class CallInst;
40class CallLowering;
41class Constant;
42class ConstrainedFPIntrinsic;
43class DataLayout;
44class Instruction;
45class MachineBasicBlock;
46class MachineFunction;
47class MachineInstr;
48class MachineRegisterInfo;
49class OptimizationRemarkEmitter;
50class PHINode;
51class TargetLibraryInfo;
52class TargetPassConfig;
53class User;
54class Value;
55
56// Technically the pass should run on an hypothetical MachineModule,
57// since it should translate Global into some sort of MachineGlobal.
58// The MachineGlobal should ultimately just be a transfer of ownership of
59// the interesting bits that are relevant to represent a global value.
60// That being said, we could investigate what would it cost to just duplicate
61// the information from the LLVM IR.
62// The idea is that ultimately we would be able to free up the memory used
63// by the LLVM IR as soon as the translation is over.
65public:
66 static char ID;
67
68private:
69 /// Interface used to lower the everything related to calls.
70 const CallLowering *CLI;
71
72 /// This class contains the mapping between the Values to vreg related data.
73 class ValueToVRegInfo {
74 public:
75 ValueToVRegInfo() = default;
76
77 using VRegListT = SmallVector<Register, 1>;
78 using OffsetListT = SmallVector<uint64_t, 1>;
79
80 using const_vreg_iterator =
82 using const_offset_iterator =
84
85 inline const_vreg_iterator vregs_end() const { return ValToVRegs.end(); }
86
87 VRegListT *getVRegs(const Value &V) {
88 auto It = ValToVRegs.find(&V);
89 if (It != ValToVRegs.end())
90 return It->second;
91
92 return insertVRegs(V);
93 }
94
95 OffsetListT *getOffsets(const Value &V) {
96 auto It = TypeToOffsets.find(V.getType());
97 if (It != TypeToOffsets.end())
98 return It->second;
99
100 return insertOffsets(V);
101 }
102
103 const_vreg_iterator findVRegs(const Value &V) const {
104 return ValToVRegs.find(&V);
105 }
106
107 bool contains(const Value &V) const { return ValToVRegs.contains(&V); }
108
109 void reset() {
110 ValToVRegs.clear();
111 TypeToOffsets.clear();
112 VRegAlloc.DestroyAll();
113 OffsetAlloc.DestroyAll();
114 }
115
116 private:
117 VRegListT *insertVRegs(const Value &V) {
118 assert(!ValToVRegs.contains(&V) && "Value already exists");
119
120 // We placement new using our fast allocator since we never try to free
121 // the vectors until translation is finished.
122 auto *VRegList = new (VRegAlloc.Allocate()) VRegListT();
123 ValToVRegs[&V] = VRegList;
124 return VRegList;
125 }
126
127 OffsetListT *insertOffsets(const Value &V) {
128 assert(!TypeToOffsets.contains(V.getType()) && "Type already exists");
129
130 auto *OffsetList = new (OffsetAlloc.Allocate()) OffsetListT();
131 TypeToOffsets[V.getType()] = OffsetList;
132 return OffsetList;
133 }
136
137 // We store pointers to vectors here since references may be invalidated
138 // while we hold them if we stored the vectors directly.
141 };
142
143 /// Mapping of the values of the current LLVM IR function to the related
144 /// virtual registers and offsets.
145 ValueToVRegInfo VMap;
146
147 // N.b. it's not completely obvious that this will be sufficient for every
148 // LLVM IR construct (with "invoke" being the obvious candidate to mess up our
149 // lives.
151
152 // One BasicBlock can be translated to multiple MachineBasicBlocks. For such
153 // BasicBlocks translated to multiple MachineBasicBlocks, MachinePreds retains
154 // a mapping between the edges arriving at the BasicBlock to the corresponding
155 // created MachineBasicBlocks. Some BasicBlocks that get translated to a
156 // single MachineBasicBlock may also end up in this Map.
157 using CFGEdge = std::pair<const BasicBlock *, const BasicBlock *>;
159
160 // List of stubbed PHI instructions, for values and basic blocks to be filled
161 // in once all MachineBasicBlocks have been created.
163 PendingPHIs;
164
165 /// Record of what frame index has been allocated to specified allocas for
166 /// this function.
168
169 SwiftErrorValueTracking SwiftError;
170
171 /// \name Methods for translating form LLVM IR to MachineInstr.
172 /// \see ::translate for general information on the translate methods.
173 /// @{
174
175 /// Translate \p Inst into its corresponding MachineInstr instruction(s).
176 /// Insert the newly translated instruction(s) right where the CurBuilder
177 /// is set.
178 ///
179 /// The general algorithm is:
180 /// 1. Look for a virtual register for each operand or
181 /// create one.
182 /// 2 Update the VMap accordingly.
183 /// 2.alt. For constant arguments, if they are compile time constants,
184 /// produce an immediate in the right operand and do not touch
185 /// ValToReg. Actually we will go with a virtual register for each
186 /// constants because it may be expensive to actually materialize the
187 /// constant. Moreover, if the constant spans on several instructions,
188 /// CSE may not catch them.
189 /// => Update ValToVReg and remember that we saw a constant in Constants.
190 /// We will materialize all the constants in finalize.
191 /// Note: we would need to do something so that we can recognize such operand
192 /// as constants.
193 /// 3. Create the generic instruction.
194 ///
195 /// \return true if the translation succeeded.
196 bool translate(const Instruction &Inst);
197
198 /// Materialize \p C into virtual-register \p Reg. The generic instructions
199 /// performing this materialization will be inserted into the entry block of
200 /// the function.
201 ///
202 /// \return true if the materialization succeeded.
203 bool translate(const Constant &C, Register Reg);
204
205 // Translate U as a copy of V.
206 bool translateCopy(const User &U, const Value &V,
207 MachineIRBuilder &MIRBuilder);
208
209 /// Translate an LLVM bitcast into generic IR. Either a COPY or a G_BITCAST is
210 /// emitted.
211 bool translateBitCast(const User &U, MachineIRBuilder &MIRBuilder);
212
213 /// Translate an LLVM load instruction into generic IR.
214 bool translateLoad(const User &U, MachineIRBuilder &MIRBuilder);
215
216 /// Translate an LLVM store instruction into generic IR.
217 bool translateStore(const User &U, MachineIRBuilder &MIRBuilder);
218
219 /// Translate an LLVM string intrinsic (memcpy, memset, ...).
220 bool translateMemFunc(const CallInst &CI, MachineIRBuilder &MIRBuilder,
221 unsigned Opcode);
222
223 void getStackGuard(Register DstReg, MachineIRBuilder &MIRBuilder);
224
225 bool translateOverflowIntrinsic(const CallInst &CI, unsigned Op,
226 MachineIRBuilder &MIRBuilder);
227 bool translateFixedPointIntrinsic(unsigned Op, const CallInst &CI,
228 MachineIRBuilder &MIRBuilder);
229
230 /// Helper function for translateSimpleIntrinsic.
231 /// \return The generic opcode for \p IntrinsicID if \p IntrinsicID is a
232 /// simple intrinsic (ceil, fabs, etc.). Otherwise, returns
233 /// Intrinsic::not_intrinsic.
234 unsigned getSimpleIntrinsicOpcode(Intrinsic::ID ID);
235
236 /// Translates the intrinsics defined in getSimpleIntrinsicOpcode.
237 /// \return true if the translation succeeded.
238 bool translateSimpleIntrinsic(const CallInst &CI, Intrinsic::ID ID,
239 MachineIRBuilder &MIRBuilder);
240
241 bool translateConstrainedFPIntrinsic(const ConstrainedFPIntrinsic &FPI,
242 MachineIRBuilder &MIRBuilder);
243
244 bool translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
245 MachineIRBuilder &MIRBuilder);
246
247 bool translateInlineAsm(const CallBase &CB, MachineIRBuilder &MIRBuilder);
248
249 /// Common code for translating normal calls or invokes.
250 bool translateCallBase(const CallBase &CB, MachineIRBuilder &MIRBuilder);
251
252 /// Translate call instruction.
253 /// \pre \p U is a call instruction.
254 bool translateCall(const User &U, MachineIRBuilder &MIRBuilder);
255
256 /// When an invoke or a cleanupret unwinds to the next EH pad, there are
257 /// many places it could ultimately go. In the IR, we have a single unwind
258 /// destination, but in the machine CFG, we enumerate all the possible blocks.
259 /// This function skips over imaginary basic blocks that hold catchswitch
260 /// instructions, and finds all the "real" machine
261 /// basic block destinations. As those destinations may not be successors of
262 /// EHPadBB, here we also calculate the edge probability to those
263 /// destinations. The passed-in Prob is the edge probability to EHPadBB.
264 bool findUnwindDestinations(
265 const BasicBlock *EHPadBB, BranchProbability Prob,
266 SmallVectorImpl<std::pair<MachineBasicBlock *, BranchProbability>>
267 &UnwindDests);
268
269 bool translateInvoke(const User &U, MachineIRBuilder &MIRBuilder);
270
271 bool translateCallBr(const User &U, MachineIRBuilder &MIRBuilder);
272
273 bool translateLandingPad(const User &U, MachineIRBuilder &MIRBuilder);
274
275 /// Translate one of LLVM's cast instructions into MachineInstrs, with the
276 /// given generic Opcode.
277 bool translateCast(unsigned Opcode, const User &U,
278 MachineIRBuilder &MIRBuilder);
279
280 /// Translate a phi instruction.
281 bool translatePHI(const User &U, MachineIRBuilder &MIRBuilder);
282
283 /// Translate a comparison (icmp or fcmp) instruction or constant.
284 bool translateCompare(const User &U, MachineIRBuilder &MIRBuilder);
285
286 /// Translate an integer compare instruction (or constant).
287 bool translateICmp(const User &U, MachineIRBuilder &MIRBuilder) {
288 return translateCompare(U, MIRBuilder);
289 }
290
291 /// Translate a floating-point compare instruction (or constant).
292 bool translateFCmp(const User &U, MachineIRBuilder &MIRBuilder) {
293 return translateCompare(U, MIRBuilder);
294 }
295
296 /// Add remaining operands onto phis we've translated. Executed after all
297 /// MachineBasicBlocks for the function have been created.
298 void finishPendingPhis();
299
300 /// Translate \p Inst into a unary operation \p Opcode.
301 /// \pre \p U is a unary operation.
302 bool translateUnaryOp(unsigned Opcode, const User &U,
303 MachineIRBuilder &MIRBuilder);
304
305 /// Translate \p Inst into a binary operation \p Opcode.
306 /// \pre \p U is a binary operation.
307 bool translateBinaryOp(unsigned Opcode, const User &U,
308 MachineIRBuilder &MIRBuilder);
309
310 /// If the set of cases should be emitted as a series of branches, return
311 /// true. If we should emit this as a bunch of and/or'd together conditions,
312 /// return false.
313 bool shouldEmitAsBranches(const std::vector<SwitchCG::CaseBlock> &Cases);
314 /// Helper method for findMergedConditions.
315 /// This function emits a branch and is used at the leaves of an OR or an
316 /// AND operator tree.
317 void emitBranchForMergedCondition(const Value *Cond, MachineBasicBlock *TBB,
318 MachineBasicBlock *FBB,
319 MachineBasicBlock *CurBB,
320 MachineBasicBlock *SwitchBB,
321 BranchProbability TProb,
322 BranchProbability FProb, bool InvertCond);
323 /// Used during condbr translation to find trees of conditions that can be
324 /// optimized.
325 void findMergedConditions(const Value *Cond, MachineBasicBlock *TBB,
326 MachineBasicBlock *FBB, MachineBasicBlock *CurBB,
327 MachineBasicBlock *SwitchBB,
328 Instruction::BinaryOps Opc, BranchProbability TProb,
329 BranchProbability FProb, bool InvertCond);
330
331 /// Translate branch (br) instruction.
332 /// \pre \p U is a branch instruction.
333 bool translateBr(const User &U, MachineIRBuilder &MIRBuilder);
334
335 // Begin switch lowering functions.
336 bool emitJumpTableHeader(SwitchCG::JumpTable &JT,
337 SwitchCG::JumpTableHeader &JTH,
338 MachineBasicBlock *HeaderBB);
339 void emitJumpTable(SwitchCG::JumpTable &JT, MachineBasicBlock *MBB);
340
341 void emitSwitchCase(SwitchCG::CaseBlock &CB, MachineBasicBlock *SwitchBB,
342 MachineIRBuilder &MIB);
343
344 /// Generate for for the BitTest header block, which precedes each sequence of
345 /// BitTestCases.
346 void emitBitTestHeader(SwitchCG::BitTestBlock &BTB,
347 MachineBasicBlock *SwitchMBB);
348 /// Generate code to produces one "bit test" for a given BitTestCase \p B.
349 void emitBitTestCase(SwitchCG::BitTestBlock &BB, MachineBasicBlock *NextMBB,
350 BranchProbability BranchProbToNext, Register Reg,
351 SwitchCG::BitTestCase &B, MachineBasicBlock *SwitchBB);
352
353 bool lowerJumpTableWorkItem(
354 SwitchCG::SwitchWorkListItem W, MachineBasicBlock *SwitchMBB,
355 MachineBasicBlock *CurMBB, MachineBasicBlock *DefaultMBB,
356 MachineIRBuilder &MIB, MachineFunction::iterator BBI,
357 BranchProbability UnhandledProbs, SwitchCG::CaseClusterIt I,
358 MachineBasicBlock *Fallthrough, bool FallthroughUnreachable);
359
360 bool lowerSwitchRangeWorkItem(SwitchCG::CaseClusterIt I, Value *Cond,
361 MachineBasicBlock *Fallthrough,
362 bool FallthroughUnreachable,
363 BranchProbability UnhandledProbs,
364 MachineBasicBlock *CurMBB,
365 MachineIRBuilder &MIB,
366 MachineBasicBlock *SwitchMBB);
367
368 bool lowerBitTestWorkItem(
369 SwitchCG::SwitchWorkListItem W, MachineBasicBlock *SwitchMBB,
370 MachineBasicBlock *CurMBB, MachineBasicBlock *DefaultMBB,
371 MachineIRBuilder &MIB, MachineFunction::iterator BBI,
372 BranchProbability DefaultProb, BranchProbability UnhandledProbs,
373 SwitchCG::CaseClusterIt I, MachineBasicBlock *Fallthrough,
374 bool FallthroughUnreachable);
375
376 bool lowerSwitchWorkItem(SwitchCG::SwitchWorkListItem W, Value *Cond,
377 MachineBasicBlock *SwitchMBB,
378 MachineBasicBlock *DefaultMBB,
379 MachineIRBuilder &MIB);
380
381 bool translateSwitch(const User &U, MachineIRBuilder &MIRBuilder);
382 // End switch lowering section.
383
384 bool translateIndirectBr(const User &U, MachineIRBuilder &MIRBuilder);
385
386 bool translateExtractValue(const User &U, MachineIRBuilder &MIRBuilder);
387
388 bool translateInsertValue(const User &U, MachineIRBuilder &MIRBuilder);
389
390 bool translateSelect(const User &U, MachineIRBuilder &MIRBuilder);
391
392 bool translateGetElementPtr(const User &U, MachineIRBuilder &MIRBuilder);
393
394 bool translateAlloca(const User &U, MachineIRBuilder &MIRBuilder);
395
396 /// Translate return (ret) instruction.
397 /// The target needs to implement CallLowering::lowerReturn for
398 /// this to succeed.
399 /// \pre \p U is a return instruction.
400 bool translateRet(const User &U, MachineIRBuilder &MIRBuilder);
401
402 bool translateFNeg(const User &U, MachineIRBuilder &MIRBuilder);
403
404 bool translateAdd(const User &U, MachineIRBuilder &MIRBuilder) {
405 return translateBinaryOp(TargetOpcode::G_ADD, U, MIRBuilder);
406 }
407 bool translateSub(const User &U, MachineIRBuilder &MIRBuilder) {
408 return translateBinaryOp(TargetOpcode::G_SUB, U, MIRBuilder);
409 }
410 bool translateAnd(const User &U, MachineIRBuilder &MIRBuilder) {
411 return translateBinaryOp(TargetOpcode::G_AND, U, MIRBuilder);
412 }
413 bool translateMul(const User &U, MachineIRBuilder &MIRBuilder) {
414 return translateBinaryOp(TargetOpcode::G_MUL, U, MIRBuilder);
415 }
416 bool translateOr(const User &U, MachineIRBuilder &MIRBuilder) {
417 return translateBinaryOp(TargetOpcode::G_OR, U, MIRBuilder);
418 }
419 bool translateXor(const User &U, MachineIRBuilder &MIRBuilder) {
420 return translateBinaryOp(TargetOpcode::G_XOR, U, MIRBuilder);
421 }
422
423 bool translateUDiv(const User &U, MachineIRBuilder &MIRBuilder) {
424 return translateBinaryOp(TargetOpcode::G_UDIV, U, MIRBuilder);
425 }
426 bool translateSDiv(const User &U, MachineIRBuilder &MIRBuilder) {
427 return translateBinaryOp(TargetOpcode::G_SDIV, U, MIRBuilder);
428 }
429 bool translateURem(const User &U, MachineIRBuilder &MIRBuilder) {
430 return translateBinaryOp(TargetOpcode::G_UREM, U, MIRBuilder);
431 }
432 bool translateSRem(const User &U, MachineIRBuilder &MIRBuilder) {
433 return translateBinaryOp(TargetOpcode::G_SREM, U, MIRBuilder);
434 }
435 bool translateIntToPtr(const User &U, MachineIRBuilder &MIRBuilder) {
436 return translateCast(TargetOpcode::G_INTTOPTR, U, MIRBuilder);
437 }
438 bool translatePtrToInt(const User &U, MachineIRBuilder &MIRBuilder) {
439 return translateCast(TargetOpcode::G_PTRTOINT, U, MIRBuilder);
440 }
441 bool translateTrunc(const User &U, MachineIRBuilder &MIRBuilder) {
442 return translateCast(TargetOpcode::G_TRUNC, U, MIRBuilder);
443 }
444 bool translateFPTrunc(const User &U, MachineIRBuilder &MIRBuilder) {
445 return translateCast(TargetOpcode::G_FPTRUNC, U, MIRBuilder);
446 }
447 bool translateFPExt(const User &U, MachineIRBuilder &MIRBuilder) {
448 return translateCast(TargetOpcode::G_FPEXT, U, MIRBuilder);
449 }
450 bool translateFPToUI(const User &U, MachineIRBuilder &MIRBuilder) {
451 return translateCast(TargetOpcode::G_FPTOUI, U, MIRBuilder);
452 }
453 bool translateFPToSI(const User &U, MachineIRBuilder &MIRBuilder) {
454 return translateCast(TargetOpcode::G_FPTOSI, U, MIRBuilder);
455 }
456 bool translateUIToFP(const User &U, MachineIRBuilder &MIRBuilder) {
457 return translateCast(TargetOpcode::G_UITOFP, U, MIRBuilder);
458 }
459 bool translateSIToFP(const User &U, MachineIRBuilder &MIRBuilder) {
460 return translateCast(TargetOpcode::G_SITOFP, U, MIRBuilder);
461 }
462 bool translateUnreachable(const User &U, MachineIRBuilder &MIRBuilder);
463
464 bool translateSExt(const User &U, MachineIRBuilder &MIRBuilder) {
465 return translateCast(TargetOpcode::G_SEXT, U, MIRBuilder);
466 }
467
468 bool translateZExt(const User &U, MachineIRBuilder &MIRBuilder) {
469 return translateCast(TargetOpcode::G_ZEXT, U, MIRBuilder);
470 }
471
472 bool translateShl(const User &U, MachineIRBuilder &MIRBuilder) {
473 return translateBinaryOp(TargetOpcode::G_SHL, U, MIRBuilder);
474 }
475 bool translateLShr(const User &U, MachineIRBuilder &MIRBuilder) {
476 return translateBinaryOp(TargetOpcode::G_LSHR, U, MIRBuilder);
477 }
478 bool translateAShr(const User &U, MachineIRBuilder &MIRBuilder) {
479 return translateBinaryOp(TargetOpcode::G_ASHR, U, MIRBuilder);
480 }
481
482 bool translateFAdd(const User &U, MachineIRBuilder &MIRBuilder) {
483 return translateBinaryOp(TargetOpcode::G_FADD, U, MIRBuilder);
484 }
485 bool translateFSub(const User &U, MachineIRBuilder &MIRBuilder) {
486 return translateBinaryOp(TargetOpcode::G_FSUB, U, MIRBuilder);
487 }
488 bool translateFMul(const User &U, MachineIRBuilder &MIRBuilder) {
489 return translateBinaryOp(TargetOpcode::G_FMUL, U, MIRBuilder);
490 }
491 bool translateFDiv(const User &U, MachineIRBuilder &MIRBuilder) {
492 return translateBinaryOp(TargetOpcode::G_FDIV, U, MIRBuilder);
493 }
494 bool translateFRem(const User &U, MachineIRBuilder &MIRBuilder) {
495 return translateBinaryOp(TargetOpcode::G_FREM, U, MIRBuilder);
496 }
497
498 bool translateVAArg(const User &U, MachineIRBuilder &MIRBuilder);
499
500 bool translateInsertElement(const User &U, MachineIRBuilder &MIRBuilder);
501
502 bool translateExtractElement(const User &U, MachineIRBuilder &MIRBuilder);
503
504 bool translateShuffleVector(const User &U, MachineIRBuilder &MIRBuilder);
505
506 bool translateAtomicCmpXchg(const User &U, MachineIRBuilder &MIRBuilder);
507 bool translateAtomicRMW(const User &U, MachineIRBuilder &MIRBuilder);
508 bool translateFence(const User &U, MachineIRBuilder &MIRBuilder);
509 bool translateFreeze(const User &U, MachineIRBuilder &MIRBuilder);
510
511 // Stubs to keep the compiler happy while we implement the rest of the
512 // translation.
513 bool translateResume(const User &U, MachineIRBuilder &MIRBuilder) {
514 return false;
515 }
516 bool translateCleanupRet(const User &U, MachineIRBuilder &MIRBuilder) {
517 return false;
518 }
519 bool translateCatchRet(const User &U, MachineIRBuilder &MIRBuilder) {
520 return false;
521 }
522 bool translateCatchSwitch(const User &U, MachineIRBuilder &MIRBuilder) {
523 return false;
524 }
525 bool translateAddrSpaceCast(const User &U, MachineIRBuilder &MIRBuilder) {
526 return translateCast(TargetOpcode::G_ADDRSPACE_CAST, U, MIRBuilder);
527 }
528 bool translateCleanupPad(const User &U, MachineIRBuilder &MIRBuilder) {
529 return false;
530 }
531 bool translateCatchPad(const User &U, MachineIRBuilder &MIRBuilder) {
532 return false;
533 }
534 bool translateUserOp1(const User &U, MachineIRBuilder &MIRBuilder) {
535 return false;
536 }
537 bool translateUserOp2(const User &U, MachineIRBuilder &MIRBuilder) {
538 return false;
539 }
540
541 /// @}
542
543 // Builder for machine instruction a la IRBuilder.
544 // I.e., compared to regular MIBuilder, this one also inserts the instruction
545 // in the current block, it can creates block, etc., basically a kind of
546 // IRBuilder, but for Machine IR.
547 // CSEMIRBuilder CurBuilder;
548 std::unique_ptr<MachineIRBuilder> CurBuilder;
549
550 // Builder set to the entry block (just after ABI lowering instructions). Used
551 // as a convenient location for Constants.
552 // CSEMIRBuilder EntryBuilder;
553 std::unique_ptr<MachineIRBuilder> EntryBuilder;
554
555 // The MachineFunction currently being translated.
556 MachineFunction *MF;
557
558 /// MachineRegisterInfo used to create virtual registers.
559 MachineRegisterInfo *MRI = nullptr;
560
561 const DataLayout *DL;
562
563 /// Current target configuration. Controls how the pass handles errors.
564 const TargetPassConfig *TPC;
565
566 CodeGenOpt::Level OptLevel;
567
568 /// Current optimization remark emitter. Used to report failures.
569 std::unique_ptr<OptimizationRemarkEmitter> ORE;
570
571 AAResults *AA;
572 AssumptionCache *AC;
573 const TargetLibraryInfo *LibInfo;
574 FunctionLoweringInfo FuncInfo;
575
576 // True when either the Target Machine specifies no optimizations or the
577 // function has the optnone attribute.
578 bool EnableOpts = false;
579
580 /// True when the block contains a tail call. This allows the IRTranslator to
581 /// stop translating such blocks early.
582 bool HasTailCall = false;
583
584 StackProtectorDescriptor SPDescriptor;
585
586 /// Switch analysis and optimization.
587 class GISelSwitchLowering : public SwitchCG::SwitchLowering {
588 public:
589 GISelSwitchLowering(IRTranslator *irt, FunctionLoweringInfo &funcinfo)
590 : SwitchLowering(funcinfo), IRT(irt) {
591 assert(irt && "irt is null!");
592 }
593
594 void addSuccessorWithProb(
595 MachineBasicBlock *Src, MachineBasicBlock *Dst,
596 BranchProbability Prob = BranchProbability::getUnknown()) override {
597 IRT->addSuccessorWithProb(Src, Dst, Prob);
598 }
599
600 virtual ~GISelSwitchLowering() = default;
601
602 private:
603 IRTranslator *IRT;
604 };
605
606 std::unique_ptr<GISelSwitchLowering> SL;
607
608 // * Insert all the code needed to materialize the constants
609 // at the proper place. E.g., Entry block or dominator block
610 // of each constant depending on how fancy we want to be.
611 // * Clear the different maps.
612 void finalizeFunction();
613
614 // Processing steps done per block. E.g. emitting jump tables, stack
615 // protectors etc. Returns true if no errors, false if there was a problem
616 // that caused an abort.
617 bool finalizeBasicBlock(const BasicBlock &BB, MachineBasicBlock &MBB);
618
619 /// Codegen a new tail for a stack protector check ParentMBB which has had its
620 /// tail spliced into a stack protector check success bb.
621 ///
622 /// For a high level explanation of how this fits into the stack protector
623 /// generation see the comment on the declaration of class
624 /// StackProtectorDescriptor.
625 ///
626 /// \return true if there were no problems.
627 bool emitSPDescriptorParent(StackProtectorDescriptor &SPD,
628 MachineBasicBlock *ParentBB);
629
630 /// Codegen the failure basic block for a stack protector check.
631 ///
632 /// A failure stack protector machine basic block consists simply of a call to
633 /// __stack_chk_fail().
634 ///
635 /// For a high level explanation of how this fits into the stack protector
636 /// generation see the comment on the declaration of class
637 /// StackProtectorDescriptor.
638 ///
639 /// \return true if there were no problems.
640 bool emitSPDescriptorFailure(StackProtectorDescriptor &SPD,
641 MachineBasicBlock *FailureBB);
642
643 /// Get the VRegs that represent \p Val.
644 /// Non-aggregate types have just one corresponding VReg and the list can be
645 /// used as a single "unsigned". Aggregates get flattened. If such VRegs do
646 /// not exist, they are created.
647 ArrayRef<Register> getOrCreateVRegs(const Value &Val);
648
649 Register getOrCreateVReg(const Value &Val) {
650 auto Regs = getOrCreateVRegs(Val);
651 if (Regs.empty())
652 return 0;
653 assert(Regs.size() == 1 &&
654 "attempt to get single VReg for aggregate or void");
655 return Regs[0];
656 }
657
658 /// Allocate some vregs and offsets in the VMap. Then populate just the
659 /// offsets while leaving the vregs empty.
660 ValueToVRegInfo::VRegListT &allocateVRegs(const Value &Val);
661
662 /// Get the frame index that represents \p Val.
663 /// If such VReg does not exist, it is created.
664 int getOrCreateFrameIndex(const AllocaInst &AI);
665
666 /// Get the alignment of the given memory operation instruction. This will
667 /// either be the explicitly specified value or the ABI-required alignment for
668 /// the type being accessed (according to the Module's DataLayout).
669 Align getMemOpAlign(const Instruction &I);
670
671 /// Get the MachineBasicBlock that represents \p BB. Specifically, the block
672 /// returned will be the head of the translated block (suitable for branch
673 /// destinations).
674 MachineBasicBlock &getMBB(const BasicBlock &BB);
675
676 /// Record \p NewPred as a Machine predecessor to `Edge.second`, corresponding
677 /// to `Edge.first` at the IR level. This is used when IRTranslation creates
678 /// multiple MachineBasicBlocks for a given IR block and the CFG is no longer
679 /// represented simply by the IR-level CFG.
680 void addMachineCFGPred(CFGEdge Edge, MachineBasicBlock *NewPred);
681
682 /// Returns the Machine IR predecessors for the given IR CFG edge. Usually
683 /// this is just the single MachineBasicBlock corresponding to the predecessor
684 /// in the IR. More complex lowering can result in multiple MachineBasicBlocks
685 /// preceding the original though (e.g. switch instructions).
686 SmallVector<MachineBasicBlock *, 1> getMachinePredBBs(CFGEdge Edge) {
687 auto RemappedEdge = MachinePreds.find(Edge);
688 if (RemappedEdge != MachinePreds.end())
689 return RemappedEdge->second;
690 return SmallVector<MachineBasicBlock *, 4>(1, &getMBB(*Edge.first));
691 }
692
693 /// Return branch probability calculated by BranchProbabilityInfo for IR
694 /// blocks.
695 BranchProbability getEdgeProbability(const MachineBasicBlock *Src,
696 const MachineBasicBlock *Dst) const;
697
698 void addSuccessorWithProb(
699 MachineBasicBlock *Src, MachineBasicBlock *Dst,
700 BranchProbability Prob = BranchProbability::getUnknown());
701
702public:
704
705 StringRef getPassName() const override { return "IRTranslator"; }
706
707 void getAnalysisUsage(AnalysisUsage &AU) const override;
708
709 // Algo:
710 // CallLowering = MF.subtarget.getCallLowering()
711 // F = MF.getParent()
712 // MIRBuilder.reset(MF)
713 // getMBB(F.getEntryBB())
714 // CallLowering->translateArguments(MIRBuilder, F, ValToVReg)
715 // for each bb in F
716 // getMBB(bb)
717 // for each inst in bb
718 // if (!translate(MIRBuilder, inst, ValToVReg, ConstantToSequence))
719 // report_fatal_error("Don't know how to translate input");
720 // finalize()
721 bool runOnMachineFunction(MachineFunction &MF) override;
722};
723
724} // end namespace llvm
725
726#endif // LLVM_CODEGEN_GLOBALISEL_IRTRANSLATOR_H
return AArch64::GPR64RegClass contains(Reg)
MachineBasicBlock & MBB
This file defines the BumpPtrAllocator interface.
SmallVector< MachineOperand, 4 > Cond
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
This file defines the DenseMap class.
#define I(x, y, z)
Definition: MD5.cpp:58
This file declares the MachineIRBuilder class.
unsigned Reg
typename CallsiteContextGraph< DerivedCCG, FuncTy, CallTy >::FuncInfo FuncInfo
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the SmallVector class.
Represent the analysis usage information of a pass.
LLVM Basic Block Representation.
Definition: BasicBlock.h:56
static BranchProbability getUnknown()
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1186
This class represents a function call, abstracting a target machine's calling convention.
This is an important base class in LLVM.
Definition: Constant.h:41
This is the common base class for constrained floating point intrinsics.
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:155
iterator end()
Definition: DenseMap.h:84
bool runOnMachineFunction(MachineFunction &MF) override
runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...
static char ID
Definition: IRTranslator.h:66
StringRef getPassName() const override
getPassName - Return a nice clean name for a pass.
Definition: IRTranslator.h:705
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
IRTranslator(CodeGenOpt::Level OptLevel=CodeGenOpt::None)
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
BasicBlockListType::iterator iterator
Helper class to build MachineInstr.
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:577
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1200
A BumpPtrAllocator that allows only elements of a specific type to be allocated.
Definition: Allocator.h:382
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
SwitchLowering(FunctionLoweringInfo &funcinfo)
LLVM Value Representation.
Definition: Value.h:74
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
Level
Code generation optimization level.
Definition: CodeGen.h:57
@ BasicBlock
Various leaf nodes.
Definition: ISDOpcodes.h:71
CaseClusterVector::iterator CaseClusterIt
@ User
could "use" a pointer
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18