LLVM 23.0.0git
IRTranslator.h
Go to the documentation of this file.
1//===- llvm/CodeGen/GlobalISel/IRTranslator.h - IRTranslator ----*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file declares the IRTranslator pass.
10/// This pass is responsible for translating LLVM IR into MachineInstr.
11/// It uses target hooks to lower the ABI but aside from that, the pass
12/// generated code is generic. This is the default translator used for
13/// GlobalISel.
14///
15/// \todo Replace the comments with actual doxygen comments.
16//===----------------------------------------------------------------------===//
17
18#ifndef LLVM_CODEGEN_GLOBALISEL_IRTRANSLATOR_H
19#define LLVM_CODEGEN_GLOBALISEL_IRTRANSLATOR_H
20
21#include "llvm/ADT/DenseMap.h"
31#include <memory>
32#include <utility>
33
34namespace llvm {
35
36class AllocaInst;
37class AssumptionCache;
38class BasicBlock;
39class CallInst;
40class CallLowering;
41class Constant;
43class DataLayout;
44class DbgDeclareInst;
45class DbgValueInst;
46class Instruction;
48class MachineFunction;
49class MachineInstr;
52class PHINode;
55class User;
56class Value;
57
58// Technically the pass should run on an hypothetical MachineModule,
59// since it should translate Global into some sort of MachineGlobal.
60// The MachineGlobal should ultimately just be a transfer of ownership of
61// the interesting bits that are relevant to represent a global value.
62// That being said, we could investigate what would it cost to just duplicate
63// the information from the LLVM IR.
64// The idea is that ultimately we would be able to free up the memory used
65// by the LLVM IR as soon as the translation is over.
67public:
68 static char ID;
69
70private:
71 /// Interface used to lower the everything related to calls.
72 const CallLowering *CLI = nullptr;
73
74 /// This class contains the mapping between the Values to vreg related data.
75 class ValueToVRegInfo {
76 public:
77 ValueToVRegInfo() = default;
78
79 using VRegListT = SmallVector<Register, 1>;
80 using OffsetListT = SmallVector<uint64_t, 1>;
81
82 using const_vreg_iterator =
84 using const_offset_iterator =
86
87 inline const_vreg_iterator vregs_end() const { return ValToVRegs.end(); }
88
89 VRegListT *getVRegs(const Value &V) {
90 auto It = ValToVRegs.find(&V);
91 if (It != ValToVRegs.end())
92 return It->second;
93
94 return insertVRegs(V);
95 }
96
97 OffsetListT *getOffsets(const Value &V) {
98 auto It = TypeToOffsets.find(V.getType());
99 if (It != TypeToOffsets.end())
100 return It->second;
101
102 return insertOffsets(V);
103 }
104
105 const_vreg_iterator findVRegs(const Value &V) const {
106 return ValToVRegs.find(&V);
107 }
108
109 bool contains(const Value &V) const { return ValToVRegs.contains(&V); }
110
111 void reset() {
112 ValToVRegs.clear();
113 TypeToOffsets.clear();
114 VRegAlloc.DestroyAll();
115 OffsetAlloc.DestroyAll();
116 }
117
118 private:
119 VRegListT *insertVRegs(const Value &V) {
120 assert(!ValToVRegs.contains(&V) && "Value already exists");
121
122 // We placement new using our fast allocator since we never try to free
123 // the vectors until translation is finished.
124 auto *VRegList = new (VRegAlloc.Allocate()) VRegListT();
125 ValToVRegs[&V] = VRegList;
126 return VRegList;
127 }
128
129 OffsetListT *insertOffsets(const Value &V) {
130 assert(!TypeToOffsets.contains(V.getType()) && "Type already exists");
131
132 auto *OffsetList = new (OffsetAlloc.Allocate()) OffsetListT();
133 TypeToOffsets[V.getType()] = OffsetList;
134 return OffsetList;
135 }
138
139 // We store pointers to vectors here since references may be invalidated
140 // while we hold them if we stored the vectors directly.
143 };
144
145 /// Mapping of the values of the current LLVM IR function to the related
146 /// virtual registers and offsets.
147 ValueToVRegInfo VMap;
148
149 // One BasicBlock can be translated to multiple MachineBasicBlocks. For such
150 // BasicBlocks translated to multiple MachineBasicBlocks, MachinePreds retains
151 // a mapping between the edges arriving at the BasicBlock to the corresponding
152 // created MachineBasicBlocks. Some BasicBlocks that get translated to a
153 // single MachineBasicBlock may also end up in this Map.
154 using CFGEdge = std::pair<const BasicBlock *, const BasicBlock *>;
156
157 // List of stubbed PHI instructions, for values and basic blocks to be filled
158 // in once all MachineBasicBlocks have been created.
160 PendingPHIs;
161
162 /// Record of what frame index has been allocated to specified allocas for
163 /// this function.
165
166 SwiftErrorValueTracking SwiftError;
167
168 /// \name Methods for translating form LLVM IR to MachineInstr.
169 /// \see ::translate for general information on the translate methods.
170 /// @{
171
172 /// Translate \p Inst into its corresponding MachineInstr instruction(s).
173 /// Insert the newly translated instruction(s) right where the CurBuilder
174 /// is set.
175 ///
176 /// The general algorithm is:
177 /// 1. Look for a virtual register for each operand or
178 /// create one.
179 /// 2 Update the VMap accordingly.
180 /// 2.alt. For constant arguments, if they are compile time constants,
181 /// produce an immediate in the right operand and do not touch
182 /// ValToReg. Actually we will go with a virtual register for each
183 /// constants because it may be expensive to actually materialize the
184 /// constant. Moreover, if the constant spans on several instructions,
185 /// CSE may not catch them.
186 /// => Update ValToVReg and remember that we saw a constant in Constants.
187 /// We will materialize all the constants in finalize.
188 /// Note: we would need to do something so that we can recognize such operand
189 /// as constants.
190 /// 3. Create the generic instruction.
191 ///
192 /// \return true if the translation succeeded.
193 bool translate(const Instruction &Inst);
194
195 /// Materialize \p C into virtual-register \p Reg. The generic instructions
196 /// performing this materialization will be inserted into the entry block of
197 /// the function.
198 ///
199 /// \return true if the materialization succeeded.
200 bool translate(const Constant &C, Register Reg);
201
202 /// Examine any debug-info attached to the instruction (in the form of
203 /// DbgRecords) and translate it.
204 void translateDbgInfo(const Instruction &Inst,
205 MachineIRBuilder &MIRBuilder);
206
207 /// Translate a debug-info record of a dbg.value into a DBG_* instruction.
208 /// Pass in all the contents of the record, rather than relying on how it's
209 /// stored.
210 void translateDbgValueRecord(Value *V, bool HasArgList,
211 const DILocalVariable *Variable,
212 const DIExpression *Expression, const DebugLoc &DL,
213 MachineIRBuilder &MIRBuilder);
214
215 /// Translate a debug-info record of a dbg.declare into an indirect DBG_*
216 /// instruction. Pass in all the contents of the record, rather than relying
217 /// on how it's stored.
218 void translateDbgDeclareRecord(Value *Address, bool HasArgList,
219 const DILocalVariable *Variable,
220 const DIExpression *Expression, const DebugLoc &DL,
221 MachineIRBuilder &MIRBuilder);
222
223 // Translate U as a copy of V.
224 bool translateCopy(const User &U, const Value &V,
225 MachineIRBuilder &MIRBuilder);
226
227 /// Translate an LLVM bitcast into generic IR. Either a COPY or a G_BITCAST is
228 /// emitted.
229 bool translateBitCast(const User &U, MachineIRBuilder &MIRBuilder);
230
231 /// Translate an LLVM load instruction into generic IR.
232 bool translateLoad(const User &U, MachineIRBuilder &MIRBuilder);
233
234 /// Translate an LLVM store instruction into generic IR.
235 bool translateStore(const User &U, MachineIRBuilder &MIRBuilder);
236
237 /// Translate an LLVM string intrinsic (memcpy, memset, ...).
238 bool translateMemFunc(const CallInst &CI, MachineIRBuilder &MIRBuilder,
239 unsigned Opcode);
240
241 /// Translate an LLVM trap intrinsic (trap, debugtrap, ubsantrap).
242 bool translateTrap(const CallInst &U, MachineIRBuilder &MIRBuilder,
243 unsigned Opcode);
244
245 // Translate @llvm.vector.interleave2 and
246 // @llvm.vector.deinterleave2 intrinsics for fixed-width vector
247 // types into vector shuffles.
248 bool translateVectorInterleave2Intrinsic(const CallInst &CI,
249 MachineIRBuilder &MIRBuilder);
250 bool translateVectorDeinterleave2Intrinsic(const CallInst &CI,
251 MachineIRBuilder &MIRBuilder);
252
253 void getStackGuard(Register DstReg, MachineIRBuilder &MIRBuilder);
254
255 bool translateOverflowIntrinsic(const CallInst &CI, unsigned Op,
256 MachineIRBuilder &MIRBuilder);
257 bool translateFixedPointIntrinsic(unsigned Op, const CallInst &CI,
258 MachineIRBuilder &MIRBuilder);
259
260 /// Helper function for translateSimpleIntrinsic.
261 /// \return The generic opcode for \p IntrinsicID if \p IntrinsicID is a
262 /// simple intrinsic (ceil, fabs, etc.). Otherwise, returns
263 /// Intrinsic::not_intrinsic.
264 unsigned getSimpleIntrinsicOpcode(Intrinsic::ID ID);
265
266 /// Translates the intrinsics defined in getSimpleIntrinsicOpcode.
267 /// \return true if the translation succeeded.
268 bool translateSimpleIntrinsic(const CallInst &CI, Intrinsic::ID ID,
269 MachineIRBuilder &MIRBuilder);
270
271 bool translateConstrainedFPIntrinsic(const ConstrainedFPIntrinsic &FPI,
272 MachineIRBuilder &MIRBuilder);
273
274 bool translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
275 MachineIRBuilder &MIRBuilder);
276
277 /// Returns the single livein physical register Arg was lowered to, if
278 /// possible.
279 std::optional<MCRegister> getArgPhysReg(Argument &Arg);
280
281 /// If debug-info targets an Argument and its expression is an EntryValue,
282 /// lower it as either an entry in the MF debug table (dbg.declare), or a
283 /// DBG_VALUE targeting the corresponding livein register for that Argument
284 /// (dbg.value).
285 bool translateIfEntryValueArgument(bool isDeclare, Value *Arg,
286 const DILocalVariable *Var,
287 const DIExpression *Expr,
288 const DebugLoc &DL,
289 MachineIRBuilder &MIRBuilder);
290
291 bool translateInlineAsm(const CallBase &CB, MachineIRBuilder &MIRBuilder);
292
293 /// Common code for translating normal calls or invokes.
294 bool translateCallBase(const CallBase &CB, MachineIRBuilder &MIRBuilder);
295
296 /// Translate call instruction.
297 /// \pre \p U is a call instruction.
298 bool translateCall(const User &U, MachineIRBuilder &MIRBuilder);
299
300 bool translateIntrinsic(
301 const CallBase &CB, Intrinsic::ID ID, MachineIRBuilder &MIRBuilder,
302 ArrayRef<TargetLowering::IntrinsicInfo> TgtMemIntrinsicInfos = {});
303
304 /// When an invoke or a cleanupret unwinds to the next EH pad, there are
305 /// many places it could ultimately go. In the IR, we have a single unwind
306 /// destination, but in the machine CFG, we enumerate all the possible blocks.
307 /// This function skips over imaginary basic blocks that hold catchswitch
308 /// instructions, and finds all the "real" machine
309 /// basic block destinations. As those destinations may not be successors of
310 /// EHPadBB, here we also calculate the edge probability to those
311 /// destinations. The passed-in Prob is the edge probability to EHPadBB.
313 const BasicBlock *EHPadBB, BranchProbability Prob,
314 SmallVectorImpl<std::pair<MachineBasicBlock *, BranchProbability>>
315 &UnwindDests);
316
317 bool translateInvoke(const User &U, MachineIRBuilder &MIRBuilder);
318
319 bool translateCallBr(const User &U, MachineIRBuilder &MIRBuilder);
320 bool translateCallBrIntrinsic(const CallBrInst &I,
321 MachineIRBuilder &MIRBuilder);
322
323 bool translateLandingPad(const User &U, MachineIRBuilder &MIRBuilder);
324
325 /// Translate one of LLVM's cast instructions into MachineInstrs, with the
326 /// given generic Opcode.
327 bool translateCast(unsigned Opcode, const User &U,
328 MachineIRBuilder &MIRBuilder);
329
330 /// Translate a phi instruction.
331 bool translatePHI(const User &U, MachineIRBuilder &MIRBuilder);
332
333 /// Translate a comparison (icmp or fcmp) instruction or constant.
334 bool translateCompare(const User &U, MachineIRBuilder &MIRBuilder);
335
336 /// Translate an integer compare instruction (or constant).
337 bool translateICmp(const User &U, MachineIRBuilder &MIRBuilder) {
338 return translateCompare(U, MIRBuilder);
339 }
340
341 /// Translate a floating-point compare instruction (or constant).
342 bool translateFCmp(const User &U, MachineIRBuilder &MIRBuilder) {
343 return translateCompare(U, MIRBuilder);
344 }
345
346 /// Add remaining operands onto phis we've translated. Executed after all
347 /// MachineBasicBlocks for the function have been created.
348 void finishPendingPhis();
349
350 /// Translate \p Inst into a unary operation \p Opcode.
351 /// \pre \p U is a unary operation.
352 bool translateUnaryOp(unsigned Opcode, const User &U,
353 MachineIRBuilder &MIRBuilder);
354
355 /// Translate \p Inst into a binary operation \p Opcode.
356 /// \pre \p U is a binary operation.
357 bool translateBinaryOp(unsigned Opcode, const User &U,
358 MachineIRBuilder &MIRBuilder);
359
360 /// If the set of cases should be emitted as a series of branches, return
361 /// true. If we should emit this as a bunch of and/or'd together conditions,
362 /// return false.
363 bool shouldEmitAsBranches(const std::vector<SwitchCG::CaseBlock> &Cases);
364 /// Helper method for findMergedConditions.
365 /// This function emits a branch and is used at the leaves of an OR or an
366 /// AND operator tree.
367 void emitBranchForMergedCondition(const Value *Cond, MachineBasicBlock *TBB,
368 MachineBasicBlock *FBB,
369 MachineBasicBlock *CurBB,
370 MachineBasicBlock *SwitchBB,
371 BranchProbability TProb,
372 BranchProbability FProb, bool InvertCond);
373 /// Used during condbr translation to find trees of conditions that can be
374 /// optimized.
375 void findMergedConditions(const Value *Cond, MachineBasicBlock *TBB,
376 MachineBasicBlock *FBB, MachineBasicBlock *CurBB,
377 MachineBasicBlock *SwitchBB,
378 Instruction::BinaryOps Opc, BranchProbability TProb,
379 BranchProbability FProb, bool InvertCond);
380
381 /// Translate branch (br) instruction.
382 /// \pre \p U is a branch instruction.
383 bool translateUncondBr(const User &U, MachineIRBuilder &MIRBuilder);
384 bool translateCondBr(const User &U, MachineIRBuilder &MIRBuilder);
385
386 // Begin switch lowering functions.
387 bool emitJumpTableHeader(SwitchCG::JumpTable &JT,
388 SwitchCG::JumpTableHeader &JTH,
389 MachineBasicBlock *HeaderBB);
390 void emitJumpTable(SwitchCG::JumpTable &JT, MachineBasicBlock *MBB);
391
392 void emitSwitchCase(SwitchCG::CaseBlock &CB, MachineBasicBlock *SwitchBB,
393 MachineIRBuilder &MIB);
394
395 /// Generate for the BitTest header block, which precedes each sequence of
396 /// BitTestCases.
397 void emitBitTestHeader(SwitchCG::BitTestBlock &BTB,
398 MachineBasicBlock *SwitchMBB);
399 /// Generate code to produces one "bit test" for a given BitTestCase \p B.
400 void emitBitTestCase(SwitchCG::BitTestBlock &BB, MachineBasicBlock *NextMBB,
401 BranchProbability BranchProbToNext, Register Reg,
402 SwitchCG::BitTestCase &B, MachineBasicBlock *SwitchBB);
403
404 void splitWorkItem(SwitchCG::SwitchWorkList &WorkList,
405 const SwitchCG::SwitchWorkListItem &W, Value *Cond,
406 MachineBasicBlock *SwitchMBB, MachineIRBuilder &MIB);
407
408 bool lowerJumpTableWorkItem(
409 SwitchCG::SwitchWorkListItem W, MachineBasicBlock *SwitchMBB,
410 MachineBasicBlock *CurMBB, MachineBasicBlock *DefaultMBB,
411 MachineIRBuilder &MIB, MachineFunction::iterator BBI,
412 BranchProbability UnhandledProbs, SwitchCG::CaseClusterIt I,
413 MachineBasicBlock *Fallthrough, bool FallthroughUnreachable);
414
415 bool lowerSwitchRangeWorkItem(SwitchCG::CaseClusterIt I, Value *Cond,
416 MachineBasicBlock *Fallthrough,
417 bool FallthroughUnreachable,
418 BranchProbability UnhandledProbs,
419 MachineBasicBlock *CurMBB,
420 MachineIRBuilder &MIB,
421 MachineBasicBlock *SwitchMBB);
422
423 bool lowerBitTestWorkItem(
424 SwitchCG::SwitchWorkListItem W, MachineBasicBlock *SwitchMBB,
425 MachineBasicBlock *CurMBB, MachineBasicBlock *DefaultMBB,
426 MachineIRBuilder &MIB, MachineFunction::iterator BBI,
427 BranchProbability DefaultProb, BranchProbability UnhandledProbs,
428 SwitchCG::CaseClusterIt I, MachineBasicBlock *Fallthrough,
429 bool FallthroughUnreachable);
430
431 bool lowerSwitchWorkItem(SwitchCG::SwitchWorkListItem W, Value *Cond,
432 MachineBasicBlock *SwitchMBB,
433 MachineBasicBlock *DefaultMBB,
434 MachineIRBuilder &MIB);
435
436 bool translateSwitch(const User &U, MachineIRBuilder &MIRBuilder);
437 // End switch lowering section.
438
439 bool translateIndirectBr(const User &U, MachineIRBuilder &MIRBuilder);
440
441 bool translateExtractValue(const User &U, MachineIRBuilder &MIRBuilder);
442
443 bool translateInsertValue(const User &U, MachineIRBuilder &MIRBuilder);
444
445 bool translateSelect(const User &U, MachineIRBuilder &MIRBuilder);
446
447 bool translateGetElementPtr(const User &U, MachineIRBuilder &MIRBuilder);
448
449 bool translateAlloca(const User &U, MachineIRBuilder &MIRBuilder);
450
451 /// Translate return (ret) instruction.
452 /// The target needs to implement CallLowering::lowerReturn for
453 /// this to succeed.
454 /// \pre \p U is a return instruction.
455 bool translateRet(const User &U, MachineIRBuilder &MIRBuilder);
456
457 bool translateFNeg(const User &U, MachineIRBuilder &MIRBuilder);
458
459 bool translateAdd(const User &U, MachineIRBuilder &MIRBuilder) {
460 return translateBinaryOp(TargetOpcode::G_ADD, U, MIRBuilder);
461 }
462 bool translateSub(const User &U, MachineIRBuilder &MIRBuilder) {
463 return translateBinaryOp(TargetOpcode::G_SUB, U, MIRBuilder);
464 }
465 bool translateAnd(const User &U, MachineIRBuilder &MIRBuilder) {
466 return translateBinaryOp(TargetOpcode::G_AND, U, MIRBuilder);
467 }
468 bool translateMul(const User &U, MachineIRBuilder &MIRBuilder) {
469 return translateBinaryOp(TargetOpcode::G_MUL, U, MIRBuilder);
470 }
471 bool translateOr(const User &U, MachineIRBuilder &MIRBuilder) {
472 return translateBinaryOp(TargetOpcode::G_OR, U, MIRBuilder);
473 }
474 bool translateXor(const User &U, MachineIRBuilder &MIRBuilder) {
475 return translateBinaryOp(TargetOpcode::G_XOR, U, MIRBuilder);
476 }
477
478 bool translateUDiv(const User &U, MachineIRBuilder &MIRBuilder) {
479 return translateBinaryOp(TargetOpcode::G_UDIV, U, MIRBuilder);
480 }
481 bool translateSDiv(const User &U, MachineIRBuilder &MIRBuilder) {
482 return translateBinaryOp(TargetOpcode::G_SDIV, U, MIRBuilder);
483 }
484 bool translateURem(const User &U, MachineIRBuilder &MIRBuilder) {
485 return translateBinaryOp(TargetOpcode::G_UREM, U, MIRBuilder);
486 }
487 bool translateSRem(const User &U, MachineIRBuilder &MIRBuilder) {
488 return translateBinaryOp(TargetOpcode::G_SREM, U, MIRBuilder);
489 }
490 bool translateIntToPtr(const User &U, MachineIRBuilder &MIRBuilder) {
491 return translateCast(TargetOpcode::G_INTTOPTR, U, MIRBuilder);
492 }
493 bool translatePtrToInt(const User &U, MachineIRBuilder &MIRBuilder) {
494 return translateCast(TargetOpcode::G_PTRTOINT, U, MIRBuilder);
495 }
496 bool translatePtrToAddr(const User &U, MachineIRBuilder &MIRBuilder) {
497 // FIXME: this is not correct for pointers with addr width != pointer width
498 return translatePtrToInt(U, MIRBuilder);
499 }
500 bool translateTrunc(const User &U, MachineIRBuilder &MIRBuilder) {
501 return translateCast(TargetOpcode::G_TRUNC, U, MIRBuilder);
502 }
503 bool translateFPTrunc(const User &U, MachineIRBuilder &MIRBuilder) {
504 return translateCast(TargetOpcode::G_FPTRUNC, U, MIRBuilder);
505 }
506 bool translateFPExt(const User &U, MachineIRBuilder &MIRBuilder) {
507 return translateCast(TargetOpcode::G_FPEXT, U, MIRBuilder);
508 }
509 bool translateFPToUI(const User &U, MachineIRBuilder &MIRBuilder) {
510 return translateCast(TargetOpcode::G_FPTOUI, U, MIRBuilder);
511 }
512 bool translateFPToSI(const User &U, MachineIRBuilder &MIRBuilder) {
513 return translateCast(TargetOpcode::G_FPTOSI, U, MIRBuilder);
514 }
515 bool translateUIToFP(const User &U, MachineIRBuilder &MIRBuilder) {
516 return translateCast(TargetOpcode::G_UITOFP, U, MIRBuilder);
517 }
518 bool translateSIToFP(const User &U, MachineIRBuilder &MIRBuilder) {
519 return translateCast(TargetOpcode::G_SITOFP, U, MIRBuilder);
520 }
521 bool translateUnreachable(const User &U, MachineIRBuilder &MIRBuilder);
522
523 bool translateSExt(const User &U, MachineIRBuilder &MIRBuilder) {
524 return translateCast(TargetOpcode::G_SEXT, U, MIRBuilder);
525 }
526
527 bool translateZExt(const User &U, MachineIRBuilder &MIRBuilder) {
528 return translateCast(TargetOpcode::G_ZEXT, U, MIRBuilder);
529 }
530
531 bool translateShl(const User &U, MachineIRBuilder &MIRBuilder) {
532 return translateBinaryOp(TargetOpcode::G_SHL, U, MIRBuilder);
533 }
534 bool translateLShr(const User &U, MachineIRBuilder &MIRBuilder) {
535 return translateBinaryOp(TargetOpcode::G_LSHR, U, MIRBuilder);
536 }
537 bool translateAShr(const User &U, MachineIRBuilder &MIRBuilder) {
538 return translateBinaryOp(TargetOpcode::G_ASHR, U, MIRBuilder);
539 }
540
541 bool translateFAdd(const User &U, MachineIRBuilder &MIRBuilder) {
542 return translateBinaryOp(TargetOpcode::G_FADD, U, MIRBuilder);
543 }
544 bool translateFSub(const User &U, MachineIRBuilder &MIRBuilder) {
545 return translateBinaryOp(TargetOpcode::G_FSUB, U, MIRBuilder);
546 }
547 bool translateFMul(const User &U, MachineIRBuilder &MIRBuilder) {
548 return translateBinaryOp(TargetOpcode::G_FMUL, U, MIRBuilder);
549 }
550 bool translateFDiv(const User &U, MachineIRBuilder &MIRBuilder) {
551 return translateBinaryOp(TargetOpcode::G_FDIV, U, MIRBuilder);
552 }
553 bool translateFRem(const User &U, MachineIRBuilder &MIRBuilder) {
554 return translateBinaryOp(TargetOpcode::G_FREM, U, MIRBuilder);
555 }
556
557 bool translateVAArg(const User &U, MachineIRBuilder &MIRBuilder);
558
559 bool translateInsertElement(const User &U, MachineIRBuilder &MIRBuilder);
560 bool translateInsertVector(const User &U, MachineIRBuilder &MIRBuilder);
561
562 bool translateExtractElement(const User &U, MachineIRBuilder &MIRBuilder);
563 bool translateExtractVector(const User &U, MachineIRBuilder &MIRBuilder);
564
565 bool translateShuffleVector(const User &U, MachineIRBuilder &MIRBuilder);
566
567 bool translateAtomicCmpXchg(const User &U, MachineIRBuilder &MIRBuilder);
568 bool translateAtomicRMW(const User &U, MachineIRBuilder &MIRBuilder);
569 bool translateFence(const User &U, MachineIRBuilder &MIRBuilder);
570 bool translateFreeze(const User &U, MachineIRBuilder &MIRBuilder);
571
572 // Stubs to keep the compiler happy while we implement the rest of the
573 // translation.
574 bool translateResume(const User &U, MachineIRBuilder &MIRBuilder) {
575 return false;
576 }
577 bool translateCleanupRet(const User &U, MachineIRBuilder &MIRBuilder) {
578 return false;
579 }
580 bool translateCatchRet(const User &U, MachineIRBuilder &MIRBuilder) {
581 return false;
582 }
583 bool translateCatchSwitch(const User &U, MachineIRBuilder &MIRBuilder) {
584 return false;
585 }
586 bool translateAddrSpaceCast(const User &U, MachineIRBuilder &MIRBuilder) {
587 return translateCast(TargetOpcode::G_ADDRSPACE_CAST, U, MIRBuilder);
588 }
589 bool translateCleanupPad(const User &U, MachineIRBuilder &MIRBuilder) {
590 return false;
591 }
592 bool translateCatchPad(const User &U, MachineIRBuilder &MIRBuilder) {
593 return false;
594 }
595 bool translateUserOp1(const User &U, MachineIRBuilder &MIRBuilder) {
596 return false;
597 }
598 bool translateUserOp2(const User &U, MachineIRBuilder &MIRBuilder) {
599 return false;
600 }
601
602 bool translateConvergenceControlIntrinsic(const CallInst &CI,
604 MachineIRBuilder &MIRBuilder);
605
606 /// @}
607
608 // Builder for machine instruction a la IRBuilder.
609 // I.e., compared to regular MIBuilder, this one also inserts the instruction
610 // in the current block, it can creates block, etc., basically a kind of
611 // IRBuilder, but for Machine IR.
612 // CSEMIRBuilder CurBuilder;
613 std::unique_ptr<MachineIRBuilder> CurBuilder;
614
615 // Builder set to the entry block (just after ABI lowering instructions). Used
616 // as a convenient location for Constants.
617 // CSEMIRBuilder EntryBuilder;
618 std::unique_ptr<MachineIRBuilder> EntryBuilder;
619
620 // The MachineFunction currently being translated.
621 MachineFunction *MF = nullptr;
622
623 /// MachineRegisterInfo used to create virtual registers.
624 MachineRegisterInfo *MRI = nullptr;
625
626 const DataLayout *DL = nullptr;
627
628 /// Current target configuration. Controls how the pass handles errors.
629 const TargetPassConfig *TPC = nullptr;
630
631 CodeGenOptLevel OptLevel;
632
633 /// Current optimization remark emitter. Used to report failures.
634 std::unique_ptr<OptimizationRemarkEmitter> ORE;
635
636 AAResults *AA = nullptr;
637 AssumptionCache *AC = nullptr;
638 const TargetLibraryInfo *LibInfo = nullptr;
639 const LibcallLoweringInfo *Libcalls = nullptr;
640 const TargetLowering *TLI = nullptr;
641 FunctionLoweringInfo FuncInfo;
642
643 // True when either the Target Machine specifies no optimizations or the
644 // function has the optnone attribute.
645 bool EnableOpts = false;
646
647 /// True when the block contains a tail call. This allows the IRTranslator to
648 /// stop translating such blocks early.
649 bool HasTailCall = false;
650
651 StackProtectorDescriptor SPDescriptor;
652
653 /// Switch analysis and optimization.
654 class GISelSwitchLowering : public SwitchCG::SwitchLowering {
655 public:
656 GISelSwitchLowering(IRTranslator *irt, FunctionLoweringInfo &funcinfo)
657 : SwitchLowering(funcinfo), IRT(irt) {
658 assert(irt && "irt is null!");
659 }
660
661 void addSuccessorWithProb(
662 MachineBasicBlock *Src, MachineBasicBlock *Dst,
663 BranchProbability Prob = BranchProbability::getUnknown()) override {
664 IRT->addSuccessorWithProb(Src, Dst, Prob);
665 }
666
667 ~GISelSwitchLowering() override = default;
668
669 private:
670 IRTranslator *IRT;
671 };
672
673 std::unique_ptr<GISelSwitchLowering> SL;
674
675 // * Insert all the code needed to materialize the constants
676 // at the proper place. E.g., Entry block or dominator block
677 // of each constant depending on how fancy we want to be.
678 // * Clear the different maps.
679 void finalizeFunction();
680
681 // Processing steps done per block. E.g. emitting jump tables, stack
682 // protectors etc. Returns true if no errors, false if there was a problem
683 // that caused an abort.
684 bool finalizeBasicBlock(const BasicBlock &BB, MachineBasicBlock &MBB);
685
686 /// Codegen a new tail for a stack protector check ParentMBB which has had its
687 /// tail spliced into a stack protector check success bb.
688 ///
689 /// For a high level explanation of how this fits into the stack protector
690 /// generation see the comment on the declaration of class
691 /// StackProtectorDescriptor.
692 ///
693 /// \return true if there were no problems.
694 bool emitSPDescriptorParent(StackProtectorDescriptor &SPD,
695 MachineBasicBlock *ParentBB);
696
697 /// Codegen the failure basic block for a stack protector check.
698 ///
699 /// A failure stack protector machine basic block consists simply of a call to
700 /// __stack_chk_fail().
701 ///
702 /// For a high level explanation of how this fits into the stack protector
703 /// generation see the comment on the declaration of class
704 /// StackProtectorDescriptor.
705 ///
706 /// \return true if there were no problems.
707 bool emitSPDescriptorFailure(StackProtectorDescriptor &SPD,
708 MachineBasicBlock *FailureBB);
709
710 /// Get the VRegs that represent \p Val.
711 /// Non-aggregate types have just one corresponding VReg and the list can be
712 /// used as a single "unsigned". Aggregates get flattened. If such VRegs do
713 /// not exist, they are created.
714 ArrayRef<Register> getOrCreateVRegs(const Value &Val);
715
716 Register getOrCreateVReg(const Value &Val) {
717 auto Regs = getOrCreateVRegs(Val);
718 if (Regs.empty())
719 return 0;
720 assert(Regs.size() == 1 &&
721 "attempt to get single VReg for aggregate or void");
722 return Regs[0];
723 }
724
725 Register getOrCreateConvergenceTokenVReg(const Value &Token) {
726 assert(Token.getType()->isTokenTy());
727 auto &Regs = *VMap.getVRegs(Token);
728 if (!Regs.empty()) {
729 assert(Regs.size() == 1 &&
730 "Expected a single register for convergence tokens.");
731 return Regs[0];
732 }
733
734 auto Reg = MRI->createGenericVirtualRegister(LLT::token());
735 Regs.push_back(Reg);
736 auto &Offsets = *VMap.getOffsets(Token);
737 if (Offsets.empty())
738 Offsets.push_back(0);
739 return Reg;
740 }
741
742 /// Allocate some vregs and offsets in the VMap. Then populate just the
743 /// offsets while leaving the vregs empty.
744 ValueToVRegInfo::VRegListT &allocateVRegs(const Value &Val);
745
746 /// Get the frame index that represents \p Val.
747 /// If such VReg does not exist, it is created.
748 int getOrCreateFrameIndex(const AllocaInst &AI);
749
750 /// Get the alignment of the given memory operation instruction. This will
751 /// either be the explicitly specified value or the ABI-required alignment for
752 /// the type being accessed (according to the Module's DataLayout).
753 Align getMemOpAlign(const Instruction &I);
754
755 /// Get the MachineBasicBlock that represents \p BB. Specifically, the block
756 /// returned will be the head of the translated block (suitable for branch
757 /// destinations).
758 MachineBasicBlock &getMBB(const BasicBlock &BB);
759
760 /// Record \p NewPred as a Machine predecessor to `Edge.second`, corresponding
761 /// to `Edge.first` at the IR level. This is used when IRTranslation creates
762 /// multiple MachineBasicBlocks for a given IR block and the CFG is no longer
763 /// represented simply by the IR-level CFG.
764 void addMachineCFGPred(CFGEdge Edge, MachineBasicBlock *NewPred);
765
766 /// Returns the Machine IR predecessors for the given IR CFG edge. Usually
767 /// this is just the single MachineBasicBlock corresponding to the predecessor
768 /// in the IR. More complex lowering can result in multiple MachineBasicBlocks
769 /// preceding the original though (e.g. switch instructions).
770 SmallVector<MachineBasicBlock *, 1> getMachinePredBBs(CFGEdge Edge) {
771 auto RemappedEdge = MachinePreds.find(Edge);
772 if (RemappedEdge != MachinePreds.end())
773 return RemappedEdge->second;
774 return SmallVector<MachineBasicBlock *, 4>(1, &getMBB(*Edge.first));
775 }
776
777 /// Return branch probability calculated by BranchProbabilityInfo for IR
778 /// blocks.
779 BranchProbability getEdgeProbability(const MachineBasicBlock *Src,
780 const MachineBasicBlock *Dst) const;
781
782 void addSuccessorWithProb(
783 MachineBasicBlock *Src, MachineBasicBlock *Dst,
784 BranchProbability Prob = BranchProbability::getUnknown());
785
786public:
788
789 StringRef getPassName() const override { return "IRTranslator"; }
790
791 void getAnalysisUsage(AnalysisUsage &AU) const override;
792
793 // Algo:
794 // CallLowering = MF.subtarget.getCallLowering()
795 // F = MF.getParent()
796 // MIRBuilder.reset(MF)
797 // getMBB(F.getEntryBB())
798 // CallLowering->translateArguments(MIRBuilder, F, ValToVReg)
799 // for each bb in F
800 // getMBB(bb)
801 // for each inst in bb
802 // if (!translate(MIRBuilder, inst, ValToVReg, ConstantToSequence))
803 // report_fatal_error("Don't know how to translate input");
804 // finalize()
805 bool runOnMachineFunction(MachineFunction &MF) override;
806};
807
808} // end namespace llvm
809
810#endif // LLVM_CODEGEN_GLOBALISEL_IRTRANSLATOR_H
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file defines the BumpPtrAllocator interface.
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
This file defines the DenseMap class.
#define I(x, y, z)
Definition MD5.cpp:57
This file declares the MachineIRBuilder class.
Register Reg
Promote Memory to Register
Definition Mem2Reg.cpp:110
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
std::pair< BasicBlock *, BasicBlock * > Edge
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
Definition Value.cpp:487
static void findUnwindDestinations(FunctionLoweringInfo &FuncInfo, const BasicBlock *EHPadBB, BranchProbability Prob, SmallVectorImpl< std::pair< MachineBasicBlock *, BranchProbability > > &UnwindDests)
When an invoke or a cleanupret unwinds to the next EH pad, there are many places it could ultimately ...
This file defines the SmallVector class.
static Value * getStackGuard(const TargetLoweringBase &TLI, const LibcallLoweringInfo &Libcalls, Module *M, IRBuilder<> &B, bool *SupportsSelectionDAGSP=nullptr)
Create a stack guard loading and populate whether SelectionDAG SSP is supported.
an instruction to allocate memory on the stack
Represent the analysis usage information of a pass.
This class represents an incoming formal argument to a Function.
Definition Argument.h:32
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
A cache of @llvm.assume calls within a function.
LLVM Basic Block Representation.
Definition BasicBlock.h:62
static BranchProbability getUnknown()
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
This class represents a function call, abstracting a target machine's calling convention.
This is an important base class in LLVM.
Definition Constant.h:43
This is the common base class for constrained floating point intrinsics.
DWARF expression.
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
This represents the llvm.dbg.declare instruction.
This represents the llvm.dbg.value instruction.
A debug info location.
Definition DebugLoc.h:123
DenseMapIterator< KeyT, ValueT, KeyInfoT, BucketT, true > const_iterator
Definition DenseMap.h:75
Class representing an expression and its matching format.
bool runOnMachineFunction(MachineFunction &MF) override
runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...
IRTranslator(CodeGenOptLevel OptLevel=CodeGenOptLevel::None)
StringRef getPassName() const override
getPassName - Return a nice clean name for a pass.
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
static constexpr LLT token()
Get a low-level token; just a scalar with zero bits (or no size).
BasicBlockListType::iterator iterator
Helper class to build MachineInstr.
Representation of each machine instruction.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
The optimization diagnostic interface.
Wrapper class representing virtual and physical registers.
Definition Register.h:20
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
A BumpPtrAllocator that allows only elements of a specific type to be allocated.
Definition Allocator.h:390
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
SwitchLowering(FunctionLoweringInfo &funcinfo)
Provides information about what library functions are available for the current target.
Target-Independent Code Generator Pass Configuration Options.
LLVM Value Representation.
Definition Value.h:75
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
Offsets
Offsets in bytes from the start of the input buffer.
SmallVector< SwitchWorkListItem, 4 > SwitchWorkList
CaseClusterVector::iterator CaseClusterIt
This is an optimization pass for GlobalISel generic memory operations.
Definition Types.h:26
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
CodeGenOptLevel
Code generation optimization level.
Definition CodeGen.h:82
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >