LLVM 22.0.0git
IRTranslator.h
Go to the documentation of this file.
1//===- llvm/CodeGen/GlobalISel/IRTranslator.h - IRTranslator ----*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file declares the IRTranslator pass.
10/// This pass is responsible for translating LLVM IR into MachineInstr.
11/// It uses target hooks to lower the ABI but aside from that, the pass
12/// generated code is generic. This is the default translator used for
13/// GlobalISel.
14///
15/// \todo Replace the comments with actual doxygen comments.
16//===----------------------------------------------------------------------===//
17
18#ifndef LLVM_CODEGEN_GLOBALISEL_IRTRANSLATOR_H
19#define LLVM_CODEGEN_GLOBALISEL_IRTRANSLATOR_H
20
21#include "llvm/ADT/DenseMap.h"
31#include <memory>
32#include <utility>
33
34namespace llvm {
35
36class AllocaInst;
37class AssumptionCache;
38class BasicBlock;
39class CallInst;
40class CallLowering;
41class Constant;
43class DataLayout;
44class DbgDeclareInst;
45class DbgValueInst;
46class Instruction;
48class MachineFunction;
49class MachineInstr;
52class PHINode;
55class User;
56class Value;
57
58// Technically the pass should run on an hypothetical MachineModule,
59// since it should translate Global into some sort of MachineGlobal.
60// The MachineGlobal should ultimately just be a transfer of ownership of
61// the interesting bits that are relevant to represent a global value.
62// That being said, we could investigate what would it cost to just duplicate
63// the information from the LLVM IR.
64// The idea is that ultimately we would be able to free up the memory used
65// by the LLVM IR as soon as the translation is over.
67public:
68 static char ID;
69
70private:
71 /// Interface used to lower the everything related to calls.
72 const CallLowering *CLI = nullptr;
73
74 /// This class contains the mapping between the Values to vreg related data.
75 class ValueToVRegInfo {
76 public:
77 ValueToVRegInfo() = default;
78
79 using VRegListT = SmallVector<Register, 1>;
80 using OffsetListT = SmallVector<uint64_t, 1>;
81
82 using const_vreg_iterator =
84 using const_offset_iterator =
86
87 inline const_vreg_iterator vregs_end() const { return ValToVRegs.end(); }
88
89 VRegListT *getVRegs(const Value &V) {
90 auto It = ValToVRegs.find(&V);
91 if (It != ValToVRegs.end())
92 return It->second;
93
94 return insertVRegs(V);
95 }
96
97 OffsetListT *getOffsets(const Value &V) {
98 auto It = TypeToOffsets.find(V.getType());
99 if (It != TypeToOffsets.end())
100 return It->second;
101
102 return insertOffsets(V);
103 }
104
105 const_vreg_iterator findVRegs(const Value &V) const {
106 return ValToVRegs.find(&V);
107 }
108
109 bool contains(const Value &V) const { return ValToVRegs.contains(&V); }
110
111 void reset() {
112 ValToVRegs.clear();
113 TypeToOffsets.clear();
114 VRegAlloc.DestroyAll();
115 OffsetAlloc.DestroyAll();
116 }
117
118 private:
119 VRegListT *insertVRegs(const Value &V) {
120 assert(!ValToVRegs.contains(&V) && "Value already exists");
121
122 // We placement new using our fast allocator since we never try to free
123 // the vectors until translation is finished.
124 auto *VRegList = new (VRegAlloc.Allocate()) VRegListT();
125 ValToVRegs[&V] = VRegList;
126 return VRegList;
127 }
128
129 OffsetListT *insertOffsets(const Value &V) {
130 assert(!TypeToOffsets.contains(V.getType()) && "Type already exists");
131
132 auto *OffsetList = new (OffsetAlloc.Allocate()) OffsetListT();
133 TypeToOffsets[V.getType()] = OffsetList;
134 return OffsetList;
135 }
138
139 // We store pointers to vectors here since references may be invalidated
140 // while we hold them if we stored the vectors directly.
143 };
144
145 /// Mapping of the values of the current LLVM IR function to the related
146 /// virtual registers and offsets.
147 ValueToVRegInfo VMap;
148
149 // One BasicBlock can be translated to multiple MachineBasicBlocks. For such
150 // BasicBlocks translated to multiple MachineBasicBlocks, MachinePreds retains
151 // a mapping between the edges arriving at the BasicBlock to the corresponding
152 // created MachineBasicBlocks. Some BasicBlocks that get translated to a
153 // single MachineBasicBlock may also end up in this Map.
154 using CFGEdge = std::pair<const BasicBlock *, const BasicBlock *>;
156
157 // List of stubbed PHI instructions, for values and basic blocks to be filled
158 // in once all MachineBasicBlocks have been created.
160 PendingPHIs;
161
162 /// Record of what frame index has been allocated to specified allocas for
163 /// this function.
165
166 SwiftErrorValueTracking SwiftError;
167
168 /// \name Methods for translating form LLVM IR to MachineInstr.
169 /// \see ::translate for general information on the translate methods.
170 /// @{
171
172 /// Translate \p Inst into its corresponding MachineInstr instruction(s).
173 /// Insert the newly translated instruction(s) right where the CurBuilder
174 /// is set.
175 ///
176 /// The general algorithm is:
177 /// 1. Look for a virtual register for each operand or
178 /// create one.
179 /// 2 Update the VMap accordingly.
180 /// 2.alt. For constant arguments, if they are compile time constants,
181 /// produce an immediate in the right operand and do not touch
182 /// ValToReg. Actually we will go with a virtual register for each
183 /// constants because it may be expensive to actually materialize the
184 /// constant. Moreover, if the constant spans on several instructions,
185 /// CSE may not catch them.
186 /// => Update ValToVReg and remember that we saw a constant in Constants.
187 /// We will materialize all the constants in finalize.
188 /// Note: we would need to do something so that we can recognize such operand
189 /// as constants.
190 /// 3. Create the generic instruction.
191 ///
192 /// \return true if the translation succeeded.
193 bool translate(const Instruction &Inst);
194
195 /// Materialize \p C into virtual-register \p Reg. The generic instructions
196 /// performing this materialization will be inserted into the entry block of
197 /// the function.
198 ///
199 /// \return true if the materialization succeeded.
200 bool translate(const Constant &C, Register Reg);
201
202 /// Examine any debug-info attached to the instruction (in the form of
203 /// DbgRecords) and translate it.
204 void translateDbgInfo(const Instruction &Inst,
205 MachineIRBuilder &MIRBuilder);
206
207 /// Translate a debug-info record of a dbg.value into a DBG_* instruction.
208 /// Pass in all the contents of the record, rather than relying on how it's
209 /// stored.
210 void translateDbgValueRecord(Value *V, bool HasArgList,
211 const DILocalVariable *Variable,
212 const DIExpression *Expression, const DebugLoc &DL,
213 MachineIRBuilder &MIRBuilder);
214
215 /// Translate a debug-info record of a dbg.declare into an indirect DBG_*
216 /// instruction. Pass in all the contents of the record, rather than relying
217 /// on how it's stored.
218 void translateDbgDeclareRecord(Value *Address, bool HasArgList,
219 const DILocalVariable *Variable,
220 const DIExpression *Expression, const DebugLoc &DL,
221 MachineIRBuilder &MIRBuilder);
222
223 // Translate U as a copy of V.
224 bool translateCopy(const User &U, const Value &V,
225 MachineIRBuilder &MIRBuilder);
226
227 /// Translate an LLVM bitcast into generic IR. Either a COPY or a G_BITCAST is
228 /// emitted.
229 bool translateBitCast(const User &U, MachineIRBuilder &MIRBuilder);
230
231 /// Translate an LLVM load instruction into generic IR.
232 bool translateLoad(const User &U, MachineIRBuilder &MIRBuilder);
233
234 /// Translate an LLVM store instruction into generic IR.
235 bool translateStore(const User &U, MachineIRBuilder &MIRBuilder);
236
237 /// Translate an LLVM string intrinsic (memcpy, memset, ...).
238 bool translateMemFunc(const CallInst &CI, MachineIRBuilder &MIRBuilder,
239 unsigned Opcode);
240
241 /// Translate an LLVM trap intrinsic (trap, debugtrap, ubsantrap).
242 bool translateTrap(const CallInst &U, MachineIRBuilder &MIRBuilder,
243 unsigned Opcode);
244
245 // Translate @llvm.vector.interleave2 and
246 // @llvm.vector.deinterleave2 intrinsics for fixed-width vector
247 // types into vector shuffles.
248 bool translateVectorInterleave2Intrinsic(const CallInst &CI,
249 MachineIRBuilder &MIRBuilder);
250 bool translateVectorDeinterleave2Intrinsic(const CallInst &CI,
251 MachineIRBuilder &MIRBuilder);
252
253 void getStackGuard(Register DstReg, MachineIRBuilder &MIRBuilder);
254
255 bool translateOverflowIntrinsic(const CallInst &CI, unsigned Op,
256 MachineIRBuilder &MIRBuilder);
257 bool translateFixedPointIntrinsic(unsigned Op, const CallInst &CI,
258 MachineIRBuilder &MIRBuilder);
259
260 /// Helper function for translateSimpleIntrinsic.
261 /// \return The generic opcode for \p IntrinsicID if \p IntrinsicID is a
262 /// simple intrinsic (ceil, fabs, etc.). Otherwise, returns
263 /// Intrinsic::not_intrinsic.
264 unsigned getSimpleIntrinsicOpcode(Intrinsic::ID ID);
265
266 /// Translates the intrinsics defined in getSimpleIntrinsicOpcode.
267 /// \return true if the translation succeeded.
268 bool translateSimpleIntrinsic(const CallInst &CI, Intrinsic::ID ID,
269 MachineIRBuilder &MIRBuilder);
270
271 bool translateConstrainedFPIntrinsic(const ConstrainedFPIntrinsic &FPI,
272 MachineIRBuilder &MIRBuilder);
273
274 bool translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
275 MachineIRBuilder &MIRBuilder);
276
277 /// Returns the single livein physical register Arg was lowered to, if
278 /// possible.
279 std::optional<MCRegister> getArgPhysReg(Argument &Arg);
280
281 /// If debug-info targets an Argument and its expression is an EntryValue,
282 /// lower it as either an entry in the MF debug table (dbg.declare), or a
283 /// DBG_VALUE targeting the corresponding livein register for that Argument
284 /// (dbg.value).
285 bool translateIfEntryValueArgument(bool isDeclare, Value *Arg,
286 const DILocalVariable *Var,
287 const DIExpression *Expr,
288 const DebugLoc &DL,
289 MachineIRBuilder &MIRBuilder);
290
291 bool translateInlineAsm(const CallBase &CB, MachineIRBuilder &MIRBuilder);
292
293 /// Common code for translating normal calls or invokes.
294 bool translateCallBase(const CallBase &CB, MachineIRBuilder &MIRBuilder);
295
296 /// Translate call instruction.
297 /// \pre \p U is a call instruction.
298 bool translateCall(const User &U, MachineIRBuilder &MIRBuilder);
299
300 bool translateIntrinsic(
301 const CallBase &CB, Intrinsic::ID ID, MachineIRBuilder &MIRBuilder,
302 const TargetLowering::IntrinsicInfo *TgtMemIntrinsicInfo = nullptr);
303
304 /// When an invoke or a cleanupret unwinds to the next EH pad, there are
305 /// many places it could ultimately go. In the IR, we have a single unwind
306 /// destination, but in the machine CFG, we enumerate all the possible blocks.
307 /// This function skips over imaginary basic blocks that hold catchswitch
308 /// instructions, and finds all the "real" machine
309 /// basic block destinations. As those destinations may not be successors of
310 /// EHPadBB, here we also calculate the edge probability to those
311 /// destinations. The passed-in Prob is the edge probability to EHPadBB.
313 const BasicBlock *EHPadBB, BranchProbability Prob,
314 SmallVectorImpl<std::pair<MachineBasicBlock *, BranchProbability>>
315 &UnwindDests);
316
317 bool translateInvoke(const User &U, MachineIRBuilder &MIRBuilder);
318
319 bool translateCallBr(const User &U, MachineIRBuilder &MIRBuilder);
320
321 bool translateLandingPad(const User &U, MachineIRBuilder &MIRBuilder);
322
323 /// Translate one of LLVM's cast instructions into MachineInstrs, with the
324 /// given generic Opcode.
325 bool translateCast(unsigned Opcode, const User &U,
326 MachineIRBuilder &MIRBuilder);
327
328 /// Translate a phi instruction.
329 bool translatePHI(const User &U, MachineIRBuilder &MIRBuilder);
330
331 /// Translate a comparison (icmp or fcmp) instruction or constant.
332 bool translateCompare(const User &U, MachineIRBuilder &MIRBuilder);
333
334 /// Translate an integer compare instruction (or constant).
335 bool translateICmp(const User &U, MachineIRBuilder &MIRBuilder) {
336 return translateCompare(U, MIRBuilder);
337 }
338
339 /// Translate a floating-point compare instruction (or constant).
340 bool translateFCmp(const User &U, MachineIRBuilder &MIRBuilder) {
341 return translateCompare(U, MIRBuilder);
342 }
343
344 /// Add remaining operands onto phis we've translated. Executed after all
345 /// MachineBasicBlocks for the function have been created.
346 void finishPendingPhis();
347
348 /// Translate \p Inst into a unary operation \p Opcode.
349 /// \pre \p U is a unary operation.
350 bool translateUnaryOp(unsigned Opcode, const User &U,
351 MachineIRBuilder &MIRBuilder);
352
353 /// Translate \p Inst into a binary operation \p Opcode.
354 /// \pre \p U is a binary operation.
355 bool translateBinaryOp(unsigned Opcode, const User &U,
356 MachineIRBuilder &MIRBuilder);
357
358 /// If the set of cases should be emitted as a series of branches, return
359 /// true. If we should emit this as a bunch of and/or'd together conditions,
360 /// return false.
361 bool shouldEmitAsBranches(const std::vector<SwitchCG::CaseBlock> &Cases);
362 /// Helper method for findMergedConditions.
363 /// This function emits a branch and is used at the leaves of an OR or an
364 /// AND operator tree.
365 void emitBranchForMergedCondition(const Value *Cond, MachineBasicBlock *TBB,
366 MachineBasicBlock *FBB,
367 MachineBasicBlock *CurBB,
368 MachineBasicBlock *SwitchBB,
369 BranchProbability TProb,
370 BranchProbability FProb, bool InvertCond);
371 /// Used during condbr translation to find trees of conditions that can be
372 /// optimized.
373 void findMergedConditions(const Value *Cond, MachineBasicBlock *TBB,
374 MachineBasicBlock *FBB, MachineBasicBlock *CurBB,
375 MachineBasicBlock *SwitchBB,
376 Instruction::BinaryOps Opc, BranchProbability TProb,
377 BranchProbability FProb, bool InvertCond);
378
379 /// Translate branch (br) instruction.
380 /// \pre \p U is a branch instruction.
381 bool translateBr(const User &U, MachineIRBuilder &MIRBuilder);
382
383 // Begin switch lowering functions.
384 bool emitJumpTableHeader(SwitchCG::JumpTable &JT,
385 SwitchCG::JumpTableHeader &JTH,
386 MachineBasicBlock *HeaderBB);
387 void emitJumpTable(SwitchCG::JumpTable &JT, MachineBasicBlock *MBB);
388
389 void emitSwitchCase(SwitchCG::CaseBlock &CB, MachineBasicBlock *SwitchBB,
390 MachineIRBuilder &MIB);
391
392 /// Generate for the BitTest header block, which precedes each sequence of
393 /// BitTestCases.
394 void emitBitTestHeader(SwitchCG::BitTestBlock &BTB,
395 MachineBasicBlock *SwitchMBB);
396 /// Generate code to produces one "bit test" for a given BitTestCase \p B.
397 void emitBitTestCase(SwitchCG::BitTestBlock &BB, MachineBasicBlock *NextMBB,
398 BranchProbability BranchProbToNext, Register Reg,
399 SwitchCG::BitTestCase &B, MachineBasicBlock *SwitchBB);
400
401 void splitWorkItem(SwitchCG::SwitchWorkList &WorkList,
402 const SwitchCG::SwitchWorkListItem &W, Value *Cond,
403 MachineBasicBlock *SwitchMBB, MachineIRBuilder &MIB);
404
405 bool lowerJumpTableWorkItem(
406 SwitchCG::SwitchWorkListItem W, MachineBasicBlock *SwitchMBB,
407 MachineBasicBlock *CurMBB, MachineBasicBlock *DefaultMBB,
408 MachineIRBuilder &MIB, MachineFunction::iterator BBI,
409 BranchProbability UnhandledProbs, SwitchCG::CaseClusterIt I,
410 MachineBasicBlock *Fallthrough, bool FallthroughUnreachable);
411
412 bool lowerSwitchRangeWorkItem(SwitchCG::CaseClusterIt I, Value *Cond,
413 MachineBasicBlock *Fallthrough,
414 bool FallthroughUnreachable,
415 BranchProbability UnhandledProbs,
416 MachineBasicBlock *CurMBB,
417 MachineIRBuilder &MIB,
418 MachineBasicBlock *SwitchMBB);
419
420 bool lowerBitTestWorkItem(
421 SwitchCG::SwitchWorkListItem W, MachineBasicBlock *SwitchMBB,
422 MachineBasicBlock *CurMBB, MachineBasicBlock *DefaultMBB,
423 MachineIRBuilder &MIB, MachineFunction::iterator BBI,
424 BranchProbability DefaultProb, BranchProbability UnhandledProbs,
425 SwitchCG::CaseClusterIt I, MachineBasicBlock *Fallthrough,
426 bool FallthroughUnreachable);
427
428 bool lowerSwitchWorkItem(SwitchCG::SwitchWorkListItem W, Value *Cond,
429 MachineBasicBlock *SwitchMBB,
430 MachineBasicBlock *DefaultMBB,
431 MachineIRBuilder &MIB);
432
433 bool translateSwitch(const User &U, MachineIRBuilder &MIRBuilder);
434 // End switch lowering section.
435
436 bool translateIndirectBr(const User &U, MachineIRBuilder &MIRBuilder);
437
438 bool translateExtractValue(const User &U, MachineIRBuilder &MIRBuilder);
439
440 bool translateInsertValue(const User &U, MachineIRBuilder &MIRBuilder);
441
442 bool translateSelect(const User &U, MachineIRBuilder &MIRBuilder);
443
444 bool translateGetElementPtr(const User &U, MachineIRBuilder &MIRBuilder);
445
446 bool translateAlloca(const User &U, MachineIRBuilder &MIRBuilder);
447
448 /// Translate return (ret) instruction.
449 /// The target needs to implement CallLowering::lowerReturn for
450 /// this to succeed.
451 /// \pre \p U is a return instruction.
452 bool translateRet(const User &U, MachineIRBuilder &MIRBuilder);
453
454 bool translateFNeg(const User &U, MachineIRBuilder &MIRBuilder);
455
456 bool translateAdd(const User &U, MachineIRBuilder &MIRBuilder) {
457 return translateBinaryOp(TargetOpcode::G_ADD, U, MIRBuilder);
458 }
459 bool translateSub(const User &U, MachineIRBuilder &MIRBuilder) {
460 return translateBinaryOp(TargetOpcode::G_SUB, U, MIRBuilder);
461 }
462 bool translateAnd(const User &U, MachineIRBuilder &MIRBuilder) {
463 return translateBinaryOp(TargetOpcode::G_AND, U, MIRBuilder);
464 }
465 bool translateMul(const User &U, MachineIRBuilder &MIRBuilder) {
466 return translateBinaryOp(TargetOpcode::G_MUL, U, MIRBuilder);
467 }
468 bool translateOr(const User &U, MachineIRBuilder &MIRBuilder) {
469 return translateBinaryOp(TargetOpcode::G_OR, U, MIRBuilder);
470 }
471 bool translateXor(const User &U, MachineIRBuilder &MIRBuilder) {
472 return translateBinaryOp(TargetOpcode::G_XOR, U, MIRBuilder);
473 }
474
475 bool translateUDiv(const User &U, MachineIRBuilder &MIRBuilder) {
476 return translateBinaryOp(TargetOpcode::G_UDIV, U, MIRBuilder);
477 }
478 bool translateSDiv(const User &U, MachineIRBuilder &MIRBuilder) {
479 return translateBinaryOp(TargetOpcode::G_SDIV, U, MIRBuilder);
480 }
481 bool translateURem(const User &U, MachineIRBuilder &MIRBuilder) {
482 return translateBinaryOp(TargetOpcode::G_UREM, U, MIRBuilder);
483 }
484 bool translateSRem(const User &U, MachineIRBuilder &MIRBuilder) {
485 return translateBinaryOp(TargetOpcode::G_SREM, U, MIRBuilder);
486 }
487 bool translateIntToPtr(const User &U, MachineIRBuilder &MIRBuilder) {
488 return translateCast(TargetOpcode::G_INTTOPTR, U, MIRBuilder);
489 }
490 bool translatePtrToInt(const User &U, MachineIRBuilder &MIRBuilder) {
491 return translateCast(TargetOpcode::G_PTRTOINT, U, MIRBuilder);
492 }
493 bool translatePtrToAddr(const User &U, MachineIRBuilder &MIRBuilder) {
494 // FIXME: this is not correct for pointers with addr width != pointer width
495 return translatePtrToInt(U, MIRBuilder);
496 }
497 bool translateTrunc(const User &U, MachineIRBuilder &MIRBuilder) {
498 return translateCast(TargetOpcode::G_TRUNC, U, MIRBuilder);
499 }
500 bool translateFPTrunc(const User &U, MachineIRBuilder &MIRBuilder) {
501 return translateCast(TargetOpcode::G_FPTRUNC, U, MIRBuilder);
502 }
503 bool translateFPExt(const User &U, MachineIRBuilder &MIRBuilder) {
504 return translateCast(TargetOpcode::G_FPEXT, U, MIRBuilder);
505 }
506 bool translateFPToUI(const User &U, MachineIRBuilder &MIRBuilder) {
507 return translateCast(TargetOpcode::G_FPTOUI, U, MIRBuilder);
508 }
509 bool translateFPToSI(const User &U, MachineIRBuilder &MIRBuilder) {
510 return translateCast(TargetOpcode::G_FPTOSI, U, MIRBuilder);
511 }
512 bool translateUIToFP(const User &U, MachineIRBuilder &MIRBuilder) {
513 return translateCast(TargetOpcode::G_UITOFP, U, MIRBuilder);
514 }
515 bool translateSIToFP(const User &U, MachineIRBuilder &MIRBuilder) {
516 return translateCast(TargetOpcode::G_SITOFP, U, MIRBuilder);
517 }
518 bool translateUnreachable(const User &U, MachineIRBuilder &MIRBuilder);
519
520 bool translateSExt(const User &U, MachineIRBuilder &MIRBuilder) {
521 return translateCast(TargetOpcode::G_SEXT, U, MIRBuilder);
522 }
523
524 bool translateZExt(const User &U, MachineIRBuilder &MIRBuilder) {
525 return translateCast(TargetOpcode::G_ZEXT, U, MIRBuilder);
526 }
527
528 bool translateShl(const User &U, MachineIRBuilder &MIRBuilder) {
529 return translateBinaryOp(TargetOpcode::G_SHL, U, MIRBuilder);
530 }
531 bool translateLShr(const User &U, MachineIRBuilder &MIRBuilder) {
532 return translateBinaryOp(TargetOpcode::G_LSHR, U, MIRBuilder);
533 }
534 bool translateAShr(const User &U, MachineIRBuilder &MIRBuilder) {
535 return translateBinaryOp(TargetOpcode::G_ASHR, U, MIRBuilder);
536 }
537
538 bool translateFAdd(const User &U, MachineIRBuilder &MIRBuilder) {
539 return translateBinaryOp(TargetOpcode::G_FADD, U, MIRBuilder);
540 }
541 bool translateFSub(const User &U, MachineIRBuilder &MIRBuilder) {
542 return translateBinaryOp(TargetOpcode::G_FSUB, U, MIRBuilder);
543 }
544 bool translateFMul(const User &U, MachineIRBuilder &MIRBuilder) {
545 return translateBinaryOp(TargetOpcode::G_FMUL, U, MIRBuilder);
546 }
547 bool translateFDiv(const User &U, MachineIRBuilder &MIRBuilder) {
548 return translateBinaryOp(TargetOpcode::G_FDIV, U, MIRBuilder);
549 }
550 bool translateFRem(const User &U, MachineIRBuilder &MIRBuilder) {
551 return translateBinaryOp(TargetOpcode::G_FREM, U, MIRBuilder);
552 }
553
554 bool translateVAArg(const User &U, MachineIRBuilder &MIRBuilder);
555
556 bool translateInsertElement(const User &U, MachineIRBuilder &MIRBuilder);
557 bool translateInsertVector(const User &U, MachineIRBuilder &MIRBuilder);
558
559 bool translateExtractElement(const User &U, MachineIRBuilder &MIRBuilder);
560 bool translateExtractVector(const User &U, MachineIRBuilder &MIRBuilder);
561
562 bool translateShuffleVector(const User &U, MachineIRBuilder &MIRBuilder);
563
564 bool translateAtomicCmpXchg(const User &U, MachineIRBuilder &MIRBuilder);
565 bool translateAtomicRMW(const User &U, MachineIRBuilder &MIRBuilder);
566 bool translateFence(const User &U, MachineIRBuilder &MIRBuilder);
567 bool translateFreeze(const User &U, MachineIRBuilder &MIRBuilder);
568
569 // Stubs to keep the compiler happy while we implement the rest of the
570 // translation.
571 bool translateResume(const User &U, MachineIRBuilder &MIRBuilder) {
572 return false;
573 }
574 bool translateCleanupRet(const User &U, MachineIRBuilder &MIRBuilder) {
575 return false;
576 }
577 bool translateCatchRet(const User &U, MachineIRBuilder &MIRBuilder) {
578 return false;
579 }
580 bool translateCatchSwitch(const User &U, MachineIRBuilder &MIRBuilder) {
581 return false;
582 }
583 bool translateAddrSpaceCast(const User &U, MachineIRBuilder &MIRBuilder) {
584 return translateCast(TargetOpcode::G_ADDRSPACE_CAST, U, MIRBuilder);
585 }
586 bool translateCleanupPad(const User &U, MachineIRBuilder &MIRBuilder) {
587 return false;
588 }
589 bool translateCatchPad(const User &U, MachineIRBuilder &MIRBuilder) {
590 return false;
591 }
592 bool translateUserOp1(const User &U, MachineIRBuilder &MIRBuilder) {
593 return false;
594 }
595 bool translateUserOp2(const User &U, MachineIRBuilder &MIRBuilder) {
596 return false;
597 }
598
599 bool translateConvergenceControlIntrinsic(const CallInst &CI,
601 MachineIRBuilder &MIRBuilder);
602
603 /// @}
604
605 // Builder for machine instruction a la IRBuilder.
606 // I.e., compared to regular MIBuilder, this one also inserts the instruction
607 // in the current block, it can creates block, etc., basically a kind of
608 // IRBuilder, but for Machine IR.
609 // CSEMIRBuilder CurBuilder;
610 std::unique_ptr<MachineIRBuilder> CurBuilder;
611
612 // Builder set to the entry block (just after ABI lowering instructions). Used
613 // as a convenient location for Constants.
614 // CSEMIRBuilder EntryBuilder;
615 std::unique_ptr<MachineIRBuilder> EntryBuilder;
616
617 // The MachineFunction currently being translated.
618 MachineFunction *MF = nullptr;
619
620 /// MachineRegisterInfo used to create virtual registers.
621 MachineRegisterInfo *MRI = nullptr;
622
623 const DataLayout *DL = nullptr;
624
625 /// Current target configuration. Controls how the pass handles errors.
626 const TargetPassConfig *TPC = nullptr;
627
628 CodeGenOptLevel OptLevel;
629
630 /// Current optimization remark emitter. Used to report failures.
631 std::unique_ptr<OptimizationRemarkEmitter> ORE;
632
633 AAResults *AA = nullptr;
634 AssumptionCache *AC = nullptr;
635 const TargetLibraryInfo *LibInfo = nullptr;
636 const TargetLowering *TLI = nullptr;
637 FunctionLoweringInfo FuncInfo;
638
639 // True when either the Target Machine specifies no optimizations or the
640 // function has the optnone attribute.
641 bool EnableOpts = false;
642
643 /// True when the block contains a tail call. This allows the IRTranslator to
644 /// stop translating such blocks early.
645 bool HasTailCall = false;
646
647 StackProtectorDescriptor SPDescriptor;
648
649 /// Switch analysis and optimization.
650 class GISelSwitchLowering : public SwitchCG::SwitchLowering {
651 public:
652 GISelSwitchLowering(IRTranslator *irt, FunctionLoweringInfo &funcinfo)
653 : SwitchLowering(funcinfo), IRT(irt) {
654 assert(irt && "irt is null!");
655 }
656
657 void addSuccessorWithProb(
658 MachineBasicBlock *Src, MachineBasicBlock *Dst,
659 BranchProbability Prob = BranchProbability::getUnknown()) override {
660 IRT->addSuccessorWithProb(Src, Dst, Prob);
661 }
662
663 ~GISelSwitchLowering() override = default;
664
665 private:
666 IRTranslator *IRT;
667 };
668
669 std::unique_ptr<GISelSwitchLowering> SL;
670
671 // * Insert all the code needed to materialize the constants
672 // at the proper place. E.g., Entry block or dominator block
673 // of each constant depending on how fancy we want to be.
674 // * Clear the different maps.
675 void finalizeFunction();
676
677 // Processing steps done per block. E.g. emitting jump tables, stack
678 // protectors etc. Returns true if no errors, false if there was a problem
679 // that caused an abort.
680 bool finalizeBasicBlock(const BasicBlock &BB, MachineBasicBlock &MBB);
681
682 /// Codegen a new tail for a stack protector check ParentMBB which has had its
683 /// tail spliced into a stack protector check success bb.
684 ///
685 /// For a high level explanation of how this fits into the stack protector
686 /// generation see the comment on the declaration of class
687 /// StackProtectorDescriptor.
688 ///
689 /// \return true if there were no problems.
690 bool emitSPDescriptorParent(StackProtectorDescriptor &SPD,
691 MachineBasicBlock *ParentBB);
692
693 /// Codegen the failure basic block for a stack protector check.
694 ///
695 /// A failure stack protector machine basic block consists simply of a call to
696 /// __stack_chk_fail().
697 ///
698 /// For a high level explanation of how this fits into the stack protector
699 /// generation see the comment on the declaration of class
700 /// StackProtectorDescriptor.
701 ///
702 /// \return true if there were no problems.
703 bool emitSPDescriptorFailure(StackProtectorDescriptor &SPD,
704 MachineBasicBlock *FailureBB);
705
706 /// Get the VRegs that represent \p Val.
707 /// Non-aggregate types have just one corresponding VReg and the list can be
708 /// used as a single "unsigned". Aggregates get flattened. If such VRegs do
709 /// not exist, they are created.
710 ArrayRef<Register> getOrCreateVRegs(const Value &Val);
711
712 Register getOrCreateVReg(const Value &Val) {
713 auto Regs = getOrCreateVRegs(Val);
714 if (Regs.empty())
715 return 0;
716 assert(Regs.size() == 1 &&
717 "attempt to get single VReg for aggregate or void");
718 return Regs[0];
719 }
720
721 Register getOrCreateConvergenceTokenVReg(const Value &Token) {
722 assert(Token.getType()->isTokenTy());
723 auto &Regs = *VMap.getVRegs(Token);
724 if (!Regs.empty()) {
725 assert(Regs.size() == 1 &&
726 "Expected a single register for convergence tokens.");
727 return Regs[0];
728 }
729
730 auto Reg = MRI->createGenericVirtualRegister(LLT::token());
731 Regs.push_back(Reg);
732 auto &Offsets = *VMap.getOffsets(Token);
733 if (Offsets.empty())
734 Offsets.push_back(0);
735 return Reg;
736 }
737
738 /// Allocate some vregs and offsets in the VMap. Then populate just the
739 /// offsets while leaving the vregs empty.
740 ValueToVRegInfo::VRegListT &allocateVRegs(const Value &Val);
741
742 /// Get the frame index that represents \p Val.
743 /// If such VReg does not exist, it is created.
744 int getOrCreateFrameIndex(const AllocaInst &AI);
745
746 /// Get the alignment of the given memory operation instruction. This will
747 /// either be the explicitly specified value or the ABI-required alignment for
748 /// the type being accessed (according to the Module's DataLayout).
749 Align getMemOpAlign(const Instruction &I);
750
751 /// Get the MachineBasicBlock that represents \p BB. Specifically, the block
752 /// returned will be the head of the translated block (suitable for branch
753 /// destinations).
754 MachineBasicBlock &getMBB(const BasicBlock &BB);
755
756 /// Record \p NewPred as a Machine predecessor to `Edge.second`, corresponding
757 /// to `Edge.first` at the IR level. This is used when IRTranslation creates
758 /// multiple MachineBasicBlocks for a given IR block and the CFG is no longer
759 /// represented simply by the IR-level CFG.
760 void addMachineCFGPred(CFGEdge Edge, MachineBasicBlock *NewPred);
761
762 /// Returns the Machine IR predecessors for the given IR CFG edge. Usually
763 /// this is just the single MachineBasicBlock corresponding to the predecessor
764 /// in the IR. More complex lowering can result in multiple MachineBasicBlocks
765 /// preceding the original though (e.g. switch instructions).
766 SmallVector<MachineBasicBlock *, 1> getMachinePredBBs(CFGEdge Edge) {
767 auto RemappedEdge = MachinePreds.find(Edge);
768 if (RemappedEdge != MachinePreds.end())
769 return RemappedEdge->second;
770 return SmallVector<MachineBasicBlock *, 4>(1, &getMBB(*Edge.first));
771 }
772
773 /// Return branch probability calculated by BranchProbabilityInfo for IR
774 /// blocks.
775 BranchProbability getEdgeProbability(const MachineBasicBlock *Src,
776 const MachineBasicBlock *Dst) const;
777
778 void addSuccessorWithProb(
779 MachineBasicBlock *Src, MachineBasicBlock *Dst,
780 BranchProbability Prob = BranchProbability::getUnknown());
781
782public:
784
785 StringRef getPassName() const override { return "IRTranslator"; }
786
787 void getAnalysisUsage(AnalysisUsage &AU) const override;
788
789 // Algo:
790 // CallLowering = MF.subtarget.getCallLowering()
791 // F = MF.getParent()
792 // MIRBuilder.reset(MF)
793 // getMBB(F.getEntryBB())
794 // CallLowering->translateArguments(MIRBuilder, F, ValToVReg)
795 // for each bb in F
796 // getMBB(bb)
797 // for each inst in bb
798 // if (!translate(MIRBuilder, inst, ValToVReg, ConstantToSequence))
799 // report_fatal_error("Don't know how to translate input");
800 // finalize()
801 bool runOnMachineFunction(MachineFunction &MF) override;
802};
803
804} // end namespace llvm
805
806#endif // LLVM_CODEGEN_GLOBALISEL_IRTRANSLATOR_H
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file defines the BumpPtrAllocator interface.
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
This file defines the DenseMap class.
#define I(x, y, z)
Definition MD5.cpp:58
This file declares the MachineIRBuilder class.
Register Reg
Promote Memory to Register
Definition Mem2Reg.cpp:110
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
std::pair< BasicBlock *, BasicBlock * > Edge
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
Definition Value.cpp:480
static void findUnwindDestinations(FunctionLoweringInfo &FuncInfo, const BasicBlock *EHPadBB, BranchProbability Prob, SmallVectorImpl< std::pair< MachineBasicBlock *, BranchProbability > > &UnwindDests)
When an invoke or a cleanupret unwinds to the next EH pad, there are many places it could ultimately ...
This file defines the SmallVector class.
static Value * getStackGuard(const TargetLoweringBase *TLI, Module *M, IRBuilder<> &B, bool *SupportsSelectionDAGSP=nullptr)
Create a stack guard loading and populate whether SelectionDAG SSP is supported.
an instruction to allocate memory on the stack
Represent the analysis usage information of a pass.
This class represents an incoming formal argument to a Function.
Definition Argument.h:32
A cache of @llvm.assume calls within a function.
LLVM Basic Block Representation.
Definition BasicBlock.h:62
static BranchProbability getUnknown()
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
This class represents a function call, abstracting a target machine's calling convention.
This is an important base class in LLVM.
Definition Constant.h:43
This is the common base class for constrained floating point intrinsics.
DWARF expression.
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:63
This represents the llvm.dbg.declare instruction.
This represents the llvm.dbg.value instruction.
A debug info location.
Definition DebugLoc.h:124
DenseMapIterator< KeyT, ValueT, KeyInfoT, BucketT, true > const_iterator
Definition DenseMap.h:75
Class representing an expression and its matching format.
bool runOnMachineFunction(MachineFunction &MF) override
runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...
IRTranslator(CodeGenOptLevel OptLevel=CodeGenOptLevel::None)
StringRef getPassName() const override
getPassName - Return a nice clean name for a pass.
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
static constexpr LLT token()
Get a low-level token; just a scalar with zero bits (or no size).
BasicBlockListType::iterator iterator
Helper class to build MachineInstr.
Representation of each machine instruction.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
The optimization diagnostic interface.
Wrapper class representing virtual and physical registers.
Definition Register.h:20
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
A BumpPtrAllocator that allows only elements of a specific type to be allocated.
Definition Allocator.h:390
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
SwitchLowering(FunctionLoweringInfo &funcinfo)
Provides information about what library functions are available for the current target.
Target-Independent Code Generator Pass Configuration Options.
LLVM Value Representation.
Definition Value.h:75
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
Offsets
Offsets in bytes from the start of the input buffer.
SmallVector< SwitchWorkListItem, 4 > SwitchWorkList
CaseClusterVector::iterator CaseClusterIt
This is an optimization pass for GlobalISel generic memory operations.
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
CodeGenOptLevel
Code generation optimization level.
Definition CodeGen.h:82
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >